changeset 10636:b985cbb00e68

8223147: JFR Backport 8199712: Flight Recorder 8203346: JFR: Inconsistent signature of jfr_add_string_constant 8195817: JFR.stop should require name of recording 8195818: JFR.start should increase autogenerated name by one 8195819: Remove recording=x from jcmd JFR.check output 8203921: JFR thread sampling is missing fixes from JDK-8194552 8203929: Limit amount of data for JFR.dump 8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording 8003209: JFR events for network utilization 8207392: [PPC64] Implement JFR profiling 8202835: jfr/event/os/TestSystemProcess.java fails on missing events Summary: Backport JFR from JDK11. Initial integration Reviewed-by: neugens
author apetushkov
date Mon, 12 Aug 2019 18:30:40 +0300
parents c7a3e57fdf4a
children 7d05a422d710
files make/Makefile make/aix/makefiles/trace.make make/bsd/makefiles/buildtree.make make/bsd/makefiles/jfr.make make/bsd/makefiles/rules.make make/bsd/makefiles/top.make make/bsd/makefiles/trace.make make/bsd/makefiles/vm.make make/defs.make make/linux/makefiles/buildtree.make make/linux/makefiles/jfr.make make/linux/makefiles/rules.make make/linux/makefiles/top.make make/linux/makefiles/trace.make make/linux/makefiles/vm.make make/solaris/makefiles/buildtree.make make/solaris/makefiles/jfr.make make/solaris/makefiles/rules.make make/solaris/makefiles/top.make make/solaris/makefiles/trace.make make/solaris/makefiles/vm.make make/windows/build.make make/windows/create_obj_files.sh make/windows/makefiles/compile.make make/windows/makefiles/defs.make make/windows/makefiles/generated.make make/windows/makefiles/jfr.make make/windows/makefiles/rules.make make/windows/makefiles/trace.make make/windows/makefiles/vm.make make/windows/projectfiles/common/Makefile src/cpu/ppc/vm/frame_ppc.cpp src/cpu/ppc/vm/vm_version_ext_ppc.cpp src/cpu/ppc/vm/vm_version_ext_ppc.hpp src/cpu/sparc/vm/vm_version_ext_sparc.cpp src/cpu/sparc/vm/vm_version_ext_sparc.hpp src/cpu/x86/vm/rdtsc_x86.cpp src/cpu/x86/vm/rdtsc_x86.hpp src/cpu/x86/vm/vm_version_ext_x86.cpp src/cpu/x86/vm/vm_version_ext_x86.hpp src/os/aix/vm/os_aix.cpp src/os/aix/vm/os_perf_aix.cpp src/os/bsd/vm/os_bsd.cpp src/os/bsd/vm/os_perf_bsd.cpp src/os/bsd/vm/semaphore_bsd.cpp src/os/bsd/vm/semaphore_bsd.hpp src/os/linux/vm/os_linux.cpp src/os/linux/vm/os_perf_linux.cpp src/os/linux/vm/perfMemory_linux.cpp src/os/posix/vm/os_posix.cpp src/os/posix/vm/os_posix.hpp src/os/posix/vm/semaphore_posix.cpp src/os/posix/vm/semaphore_posix.hpp src/os/solaris/vm/os_perf_solaris.cpp src/os/solaris/vm/os_solaris.cpp src/os/windows/vm/iphlp_interface.cpp src/os/windows/vm/iphlp_interface.hpp src/os/windows/vm/os_perf_windows.cpp src/os/windows/vm/os_windows.cpp src/os/windows/vm/os_windows.hpp src/os/windows/vm/pdh_interface.cpp src/os/windows/vm/pdh_interface.hpp src/os/windows/vm/semaphore_windows.cpp src/os/windows/vm/semaphore_windows.hpp src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp src/os_cpu/solaris_x86/vm/os_solaris_x86.inline.hpp src/share/vm/c1/c1_GraphBuilder.cpp src/share/vm/c1/c1_LIRGenerator.cpp src/share/vm/c1/c1_LIRGenerator.hpp src/share/vm/c1/c1_Runtime1.cpp src/share/vm/ci/ciEnv.cpp src/share/vm/ci/ciMethod.hpp src/share/vm/classfile/classFileParser.cpp src/share/vm/classfile/classFileParser.hpp src/share/vm/classfile/classFileStream.cpp src/share/vm/classfile/classFileStream.hpp src/share/vm/classfile/classLoader.cpp src/share/vm/classfile/classLoaderData.cpp src/share/vm/classfile/classLoaderData.hpp src/share/vm/classfile/javaClasses.cpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/classfile/systemDictionary.hpp src/share/vm/classfile/vmSymbols.cpp src/share/vm/classfile/vmSymbols.hpp src/share/vm/code/codeCache.cpp src/share/vm/compiler/compileBroker.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp src/share/vm/gc_implementation/g1/g1HeapRegionEventSender.cpp src/share/vm/gc_implementation/g1/g1HeapRegionEventSender.hpp src/share/vm/gc_implementation/g1/g1MMUTracker.cpp src/share/vm/gc_implementation/g1/g1MarkSweep.cpp src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp src/share/vm/gc_implementation/parNew/parNewGeneration.cpp src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp src/share/vm/gc_implementation/shared/ageTable.cpp src/share/vm/gc_implementation/shared/ageTable.hpp src/share/vm/gc_implementation/shared/ageTableTracer.cpp src/share/vm/gc_implementation/shared/ageTableTracer.hpp src/share/vm/gc_implementation/shared/gcConfiguration.cpp src/share/vm/gc_implementation/shared/gcConfiguration.hpp src/share/vm/gc_implementation/shared/gcTimer.cpp src/share/vm/gc_implementation/shared/gcTrace.cpp src/share/vm/gc_implementation/shared/gcTrace.hpp src/share/vm/gc_implementation/shared/gcTraceSend.cpp src/share/vm/gc_implementation/shared/gcTraceTime.cpp src/share/vm/gc_implementation/shared/objectCountEventSender.cpp src/share/vm/gc_implementation/shared/objectCountEventSender.hpp src/share/vm/gc_interface/allocTracer.cpp src/share/vm/gc_interface/allocTracer.hpp src/share/vm/gc_interface/collectedHeap.cpp src/share/vm/gc_interface/collectedHeap.inline.hpp src/share/vm/jfr/GenerateJfrFiles.java src/share/vm/jfr/dcmd/jfrDcmds.cpp src/share/vm/jfr/dcmd/jfrDcmds.hpp src/share/vm/jfr/instrumentation/jfrEventClassTransformer.cpp src/share/vm/jfr/instrumentation/jfrEventClassTransformer.hpp src/share/vm/jfr/instrumentation/jfrJvmtiAgent.cpp src/share/vm/jfr/instrumentation/jfrJvmtiAgent.hpp src/share/vm/jfr/jfr.cpp src/share/vm/jfr/jfr.cpp~ src/share/vm/jfr/jfr.hpp src/share/vm/jfr/jfr.hpp~ src/share/vm/jfr/jfrEvents.hpp src/share/vm/jfr/jni/jfrGetAllEventClasses.cpp src/share/vm/jfr/jni/jfrGetAllEventClasses.hpp src/share/vm/jfr/jni/jfrJavaCall.cpp src/share/vm/jfr/jni/jfrJavaCall.hpp src/share/vm/jfr/jni/jfrJavaSupport.cpp src/share/vm/jfr/jni/jfrJavaSupport.hpp src/share/vm/jfr/jni/jfrJniMethod.cpp src/share/vm/jfr/jni/jfrJniMethod.hpp src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp src/share/vm/jfr/jni/jfrJniMethodRegistration.hpp src/share/vm/jfr/jni/jfrUpcalls.cpp src/share/vm/jfr/jni/jfrUpcalls.hpp src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp src/share/vm/jfr/leakprofiler/chains/bitset.cpp src/share/vm/jfr/leakprofiler/chains/bitset.hpp src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp src/share/vm/jfr/leakprofiler/chains/edge.cpp src/share/vm/jfr/leakprofiler/chains/edge.hpp src/share/vm/jfr/leakprofiler/chains/edgeQueue.cpp src/share/vm/jfr/leakprofiler/chains/edgeQueue.hpp src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp src/share/vm/jfr/leakprofiler/chains/objectSampleMarker.hpp src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.hpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.cpp src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp src/share/vm/jfr/leakprofiler/emitEventOperation.cpp src/share/vm/jfr/leakprofiler/emitEventOperation.hpp src/share/vm/jfr/leakprofiler/leakProfiler.cpp src/share/vm/jfr/leakprofiler/leakProfiler.hpp src/share/vm/jfr/leakprofiler/sampling/objectSample.hpp src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp src/share/vm/jfr/leakprofiler/sampling/sampleList.cpp src/share/vm/jfr/leakprofiler/sampling/sampleList.hpp src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.cpp src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.hpp src/share/vm/jfr/leakprofiler/startOperation.hpp src/share/vm/jfr/leakprofiler/stopOperation.hpp src/share/vm/jfr/leakprofiler/utilities/granularTimer.cpp src/share/vm/jfr/leakprofiler/utilities/granularTimer.hpp src/share/vm/jfr/leakprofiler/utilities/rootType.hpp src/share/vm/jfr/leakprofiler/utilities/saveRestore.cpp src/share/vm/jfr/leakprofiler/utilities/saveRestore.hpp src/share/vm/jfr/leakprofiler/utilities/unifiedOop.hpp src/share/vm/jfr/metadata/jfrSerializer.hpp src/share/vm/jfr/metadata/metadata.xml src/share/vm/jfr/metadata/metadata.xsd src/share/vm/jfr/periodic/jfrNetworkUtilization.cpp src/share/vm/jfr/periodic/jfrNetworkUtilization.hpp src/share/vm/jfr/periodic/jfrOSInterface.cpp src/share/vm/jfr/periodic/jfrOSInterface.hpp src/share/vm/jfr/periodic/jfrPeriodic.cpp src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.cpp src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.hpp src/share/vm/jfr/periodic/jfrThreadDumpEvent.cpp src/share/vm/jfr/periodic/jfrThreadDumpEvent.hpp src/share/vm/jfr/periodic/sampling/jfrCallTrace.cpp src/share/vm/jfr/periodic/sampling/jfrCallTrace.hpp src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp src/share/vm/jfr/periodic/sampling/jfrThreadSampler.hpp src/share/vm/jfr/recorder/checkpoint/jfrCheckpointBlob.cpp src/share/vm/jfr/recorder/checkpoint/jfrCheckpointBlob.hpp src/share/vm/jfr/recorder/checkpoint/jfrCheckpointManager.cpp src/share/vm/jfr/recorder/checkpoint/jfrCheckpointManager.hpp src/share/vm/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp src/share/vm/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp src/share/vm/jfr/recorder/checkpoint/jfrMetadataEvent.cpp src/share/vm/jfr/recorder/checkpoint/jfrMetadataEvent.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp src/share/vm/jfr/recorder/checkpoint/types/jfrThreadGroup.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrThreadState.cpp src/share/vm/jfr/recorder/checkpoint/types/jfrThreadState.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp src/share/vm/jfr/recorder/checkpoint/types/jfrType.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrTypeManager.cpp src/share/vm/jfr/recorder/checkpoint/types/jfrTypeManager.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSet.cpp src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSet.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp src/share/vm/jfr/recorder/jfrEventSetting.cpp src/share/vm/jfr/recorder/jfrEventSetting.hpp src/share/vm/jfr/recorder/jfrEventSetting.inline.hpp src/share/vm/jfr/recorder/jfrRecorder.cpp src/share/vm/jfr/recorder/jfrRecorder.hpp src/share/vm/jfr/recorder/repository/jfrChunkSizeNotifier.cpp src/share/vm/jfr/recorder/repository/jfrChunkSizeNotifier.hpp src/share/vm/jfr/recorder/repository/jfrChunkState.cpp src/share/vm/jfr/recorder/repository/jfrChunkState.hpp src/share/vm/jfr/recorder/repository/jfrChunkWriter.cpp src/share/vm/jfr/recorder/repository/jfrChunkWriter.hpp src/share/vm/jfr/recorder/repository/jfrEmergencyDump.cpp src/share/vm/jfr/recorder/repository/jfrEmergencyDump.hpp src/share/vm/jfr/recorder/repository/jfrRepository.cpp src/share/vm/jfr/recorder/repository/jfrRepository.hpp src/share/vm/jfr/recorder/service/jfrEvent.cpp src/share/vm/jfr/recorder/service/jfrEvent.hpp src/share/vm/jfr/recorder/service/jfrMemorySizer.cpp src/share/vm/jfr/recorder/service/jfrMemorySizer.hpp src/share/vm/jfr/recorder/service/jfrOptionSet.cpp src/share/vm/jfr/recorder/service/jfrOptionSet.hpp src/share/vm/jfr/recorder/service/jfrPostBox.cpp src/share/vm/jfr/recorder/service/jfrPostBox.hpp src/share/vm/jfr/recorder/service/jfrRecorderService.cpp src/share/vm/jfr/recorder/service/jfrRecorderService.hpp src/share/vm/jfr/recorder/service/jfrRecorderThread.cpp src/share/vm/jfr/recorder/service/jfrRecorderThread.hpp src/share/vm/jfr/recorder/service/jfrRecorderThreadLoop.cpp src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp src/share/vm/jfr/recorder/storage/jfrBuffer.cpp src/share/vm/jfr/recorder/storage/jfrBuffer.hpp src/share/vm/jfr/recorder/storage/jfrMemorySpace.hpp src/share/vm/jfr/recorder/storage/jfrMemorySpace.inline.hpp src/share/vm/jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp src/share/vm/jfr/recorder/storage/jfrStorage.cpp src/share/vm/jfr/recorder/storage/jfrStorage.hpp src/share/vm/jfr/recorder/storage/jfrStorageControl.cpp src/share/vm/jfr/recorder/storage/jfrStorageControl.hpp src/share/vm/jfr/recorder/storage/jfrStorageUtils.hpp src/share/vm/jfr/recorder/storage/jfrStorageUtils.inline.hpp src/share/vm/jfr/recorder/storage/jfrVirtualMemory.cpp src/share/vm/jfr/recorder/storage/jfrVirtualMemory.hpp src/share/vm/jfr/recorder/stringpool/jfrStringPool.cpp src/share/vm/jfr/recorder/stringpool/jfrStringPool.hpp src/share/vm/jfr/recorder/stringpool/jfrStringPoolBuffer.cpp src/share/vm/jfr/recorder/stringpool/jfrStringPoolBuffer.hpp src/share/vm/jfr/recorder/stringpool/jfrStringPoolWriter.cpp src/share/vm/jfr/recorder/stringpool/jfrStringPoolWriter.hpp src/share/vm/jfr/support/jfrAllocationTracer.cpp src/share/vm/jfr/support/jfrAllocationTracer.hpp src/share/vm/jfr/support/jfrEventClass.cpp src/share/vm/jfr/support/jfrEventClass.hpp src/share/vm/jfr/support/jfrFlush.cpp src/share/vm/jfr/support/jfrFlush.hpp src/share/vm/jfr/support/jfrIntrinsics.hpp src/share/vm/jfr/support/jfrKlassExtension.hpp src/share/vm/jfr/support/jfrStackTraceMark.cpp src/share/vm/jfr/support/jfrStackTraceMark.hpp src/share/vm/jfr/support/jfrThreadExtension.hpp src/share/vm/jfr/support/jfrThreadId.hpp src/share/vm/jfr/support/jfrThreadLocal.cpp src/share/vm/jfr/support/jfrThreadLocal.hpp src/share/vm/jfr/support/jfrTraceIdExtension.hpp src/share/vm/jfr/utilities/jfrAllocation.cpp src/share/vm/jfr/utilities/jfrAllocation.hpp src/share/vm/jfr/utilities/jfrBigEndian.hpp src/share/vm/jfr/utilities/jfrDoublyLinkedList.hpp src/share/vm/jfr/utilities/jfrHashtable.hpp src/share/vm/jfr/utilities/jfrIterator.hpp src/share/vm/jfr/utilities/jfrJavaLog.cpp src/share/vm/jfr/utilities/jfrJavaLog.hpp src/share/vm/jfr/utilities/jfrRefCountPointer.hpp src/share/vm/jfr/utilities/jfrResourceManager.hpp src/share/vm/jfr/utilities/jfrSpinlockHelper.hpp src/share/vm/jfr/utilities/jfrTime.cpp src/share/vm/jfr/utilities/jfrTime.hpp src/share/vm/jfr/utilities/jfrTimeConverter.cpp src/share/vm/jfr/utilities/jfrTimeConverter.hpp src/share/vm/jfr/utilities/jfrTryLock.hpp src/share/vm/jfr/utilities/jfrTypes.hpp src/share/vm/jfr/writers/jfrBigEndianWriter.hpp src/share/vm/jfr/writers/jfrEncoders.hpp src/share/vm/jfr/writers/jfrEncoding.hpp src/share/vm/jfr/writers/jfrEventWriterHost.hpp src/share/vm/jfr/writers/jfrEventWriterHost.inline.hpp src/share/vm/jfr/writers/jfrJavaEventWriter.cpp src/share/vm/jfr/writers/jfrJavaEventWriter.hpp src/share/vm/jfr/writers/jfrMemoryWriterHost.hpp src/share/vm/jfr/writers/jfrMemoryWriterHost.inline.hpp src/share/vm/jfr/writers/jfrNativeEventWriter.hpp src/share/vm/jfr/writers/jfrPosition.hpp src/share/vm/jfr/writers/jfrPosition.inline.hpp src/share/vm/jfr/writers/jfrStorageAdapter.hpp src/share/vm/jfr/writers/jfrStorageHost.hpp src/share/vm/jfr/writers/jfrStorageHost.inline.hpp src/share/vm/jfr/writers/jfrStreamWriterHost.hpp src/share/vm/jfr/writers/jfrStreamWriterHost.inline.hpp src/share/vm/jfr/writers/jfrWriterHost.hpp src/share/vm/jfr/writers/jfrWriterHost.inline.hpp src/share/vm/memory/defNewGeneration.cpp src/share/vm/memory/defNewGeneration.hpp src/share/vm/memory/genCollectedHeap.cpp src/share/vm/memory/metaspaceTracer.cpp src/share/vm/memory/referenceProcessor.cpp src/share/vm/oops/arrayKlass.cpp src/share/vm/oops/instanceKlass.hpp src/share/vm/oops/klass.cpp src/share/vm/oops/klass.hpp src/share/vm/oops/method.hpp src/share/vm/opto/bytecodeInfo.cpp src/share/vm/opto/compile.cpp src/share/vm/opto/compile.hpp src/share/vm/opto/library_call.cpp src/share/vm/opto/parse.hpp src/share/vm/opto/superword.hpp src/share/vm/prims/jni.cpp src/share/vm/prims/jvm.cpp src/share/vm/prims/nativeLookup.cpp src/share/vm/prims/unsafe.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/frame.hpp src/share/vm/runtime/globals.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/handles.cpp src/share/vm/runtime/java.cpp src/share/vm/runtime/mutexLocker.cpp src/share/vm/runtime/mutexLocker.hpp src/share/vm/runtime/objectMonitor.cpp src/share/vm/runtime/objectMonitor.hpp src/share/vm/runtime/os.hpp src/share/vm/runtime/os_perf.hpp src/share/vm/runtime/safepoint.cpp src/share/vm/runtime/safepoint.hpp src/share/vm/runtime/semaphore.hpp src/share/vm/runtime/semaphore.inline.hpp src/share/vm/runtime/sweeper.cpp src/share/vm/runtime/synchronizer.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/vmStructs.cpp src/share/vm/runtime/vmStructs_trace.hpp src/share/vm/runtime/vmThread.cpp src/share/vm/runtime/vm_operations.cpp src/share/vm/runtime/vm_version.hpp src/share/vm/services/diagnosticArgument.cpp src/share/vm/services/memTracker.hpp src/share/vm/trace/noTraceBackend.hpp src/share/vm/trace/trace.dtd src/share/vm/trace/trace.xml src/share/vm/trace/traceBackend.hpp src/share/vm/trace/traceDataTypes.hpp src/share/vm/trace/traceEvent.hpp src/share/vm/trace/traceEventClasses.xsl src/share/vm/trace/traceEventIds.xsl src/share/vm/trace/traceMacros.hpp src/share/vm/trace/traceStream.hpp src/share/vm/trace/traceTime.hpp src/share/vm/trace/traceTypes.xsl src/share/vm/trace/tracetypes.xml src/share/vm/trace/tracing.hpp src/share/vm/trace/xinclude.mod src/share/vm/trace/xsl_util.xsl src/share/vm/utilities/align.hpp src/share/vm/utilities/bitMap.inline.hpp src/share/vm/utilities/globalDefinitions.hpp src/share/vm/utilities/globalDefinitions_gcc.hpp src/share/vm/utilities/globalDefinitions_sparcWorks.hpp src/share/vm/utilities/globalDefinitions_visCPP.hpp src/share/vm/utilities/globalDefinitions_xlc.hpp src/share/vm/utilities/growableArray.hpp src/share/vm/utilities/macros.hpp src/share/vm/utilities/ticks.cpp src/share/vm/utilities/ticks.hpp src/share/vm/utilities/ticks.inline.hpp src/share/vm/utilities/vmError.cpp
diffstat 411 files changed, 46017 insertions(+), 3428 deletions(-) [+]
line wrap: on
line diff
--- a/make/Makefile	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/Makefile	Mon Aug 12 18:30:40 2019 +0300
@@ -575,6 +575,11 @@
 	$(SED) 's/\(separated by \)[;:]/\1$(PATH_SEP)/g' $< > $@.temp
 	$(MV) $@.temp $@
 
+# Java Flight Recorder
+$(EXPORT_JRE_LIB_DIR)/jdk/jfr/internal/types/metadata.xml: $(HS_SRC_DIR)/share/vm/jfr/metadata/metadata.xml
+	mkdir -p $(basename $@)
+	cp $< $@
+
 #
 # Clean rules
 #
--- a/make/aix/makefiles/trace.make	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,120 +0,0 @@
-#
-# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# This makefile (trace.make) is included from the trace.make in the
-# build directories.
-#
-# It knows how to build and run the tools to generate trace files.
-
-include $(GAMMADIR)/make/linux/makefiles/rules.make
-include $(GAMMADIR)/make/altsrc.make
-
-# #########################################################################
-
-HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
-  echo "true"; else echo "false";\
-  fi)
-
-TOPDIR      = $(shell echo `pwd`)
-GENERATED   = $(TOPDIR)/../generated
-JvmtiOutDir = $(GENERATED)/jvmtifiles
-TraceOutDir   = $(GENERATED)/tracefiles
-
-TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
-TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
-
-# set VPATH so make knows where to look for source files
-Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
-VPATH += $(Src_Dirs_V:%=%:)
-
-TraceGeneratedNames =     \
-    traceEventClasses.hpp \
-	traceEventIds.hpp     \
-	traceTypes.hpp
-
-ifeq ($(HAS_ALT_SRC), true)
-TraceGeneratedNames +=  \
-	traceRequestables.hpp \
-    traceEventControl.hpp
-
-ifneq ($(INCLUDE_TRACE), false)
-TraceGeneratedNames += traceProducer.cpp
-endif
-
-endif
-
-TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
-
-XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
-
-XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
-	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
-ifeq ($(HAS_ALT_SRC), true)
-	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
-endif
-
-.PHONY: all clean cleanall
-
-# #########################################################################
-
-all: $(TraceGeneratedFiles)
-
-GENERATE_CODE= \
-  $(QUIETLY) echo Generating $@; \
-  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
-  test -f $@
-
-$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-ifeq ($(HAS_ALT_SRC), false)
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-else
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-endif
-
-# #########################################################################
-
-clean cleanall:
-	rm $(TraceGeneratedFiles)
-
-
--- a/make/bsd/makefiles/buildtree.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/bsd/makefiles/buildtree.make	Mon Aug 12 18:30:40 2019 +0300
@@ -48,7 +48,7 @@
 # flags.make	- with macro settings
 # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
 # adlc.make	-
-# trace.make	- generate tracing event and type definitions
+# jfr.make	- generate jfr event and type definitions
 # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
 # sa.make	- generate SA jar file and natives
 #
@@ -113,6 +113,10 @@
 endif
 endif
 
+ifeq ($(ENABLE_JFR),false)
+ALWAYS_EXCLUDE_DIRS += -o -name jfr
+endif
+
 # Get things from the platform file.
 COMPILER	= $(shell sed -n 's/^compiler[ 	]*=[ 	]*//p' $(PLATFORM_FILE))
 
@@ -121,7 +125,7 @@
 	$(PLATFORM_DIR)/generated/dependencies \
 	$(PLATFORM_DIR)/generated/adfiles \
 	$(PLATFORM_DIR)/generated/jvmtifiles \
-	$(PLATFORM_DIR)/generated/tracefiles \
+	$(PLATFORM_DIR)/generated/jfrfiles \
 	$(PLATFORM_DIR)/generated/dtracefiles
 
 TARGETS      = debug fastdebug optimized product
@@ -131,7 +135,7 @@
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
 # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X)
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make dtrace.make
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -201,6 +205,12 @@
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
+ifeq ($(ENABLE_JFR), true)
+  INCLUDE_JFR = 1
+else
+  INCLUDE_JFR = 0
+endif
+
 flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
@@ -280,6 +290,7 @@
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
 	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
+	echo && echo "CFLAGS += -DINCLUDE_JFR=$(INCLUDE_JFR)"; \
 	echo; \
 	[ -n "$(SPEC)" ] && \
 	    echo "include $(SPEC)"; \
@@ -348,7 +359,7 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
-trace.make: $(BUILDTREE_MAKE)
+jfr.make: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
 	$(BUILDTREE_COMMENT); \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/bsd/makefiles/jfr.make	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,92 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018-2019, Azul Systems, Inc. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (jfr.make) is included from the jfr.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate jfr.
+
+include $(GAMMADIR)/make/bsd/makefiles/rules.make
+
+# #########################################################################
+# Build tools needed for the Jfr source code generation
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+
+JFR_TOOLS_SRCDIR := $(GAMMADIR)/src/share/vm/jfr
+JFR_TOOLS_OUTPUTDIR := $(GENERATED)/tools/jfr
+
+JFR_OUTPUTDIR := $(GENERATED)/jfrfiles
+JFR_SRCDIR := $(GAMMADIR)/src/share/vm/jfr/metadata
+
+METADATA_XML ?= $(JFR_SRCDIR)/metadata.xml
+METADATA_XSD ?= $(JFR_SRCDIR)/metadata.xsd
+
+# Changing these will trigger a rebuild of generated jfr files.
+JFR_DEPS += \
+    $(METADATA_XML) \
+    $(METADATA_XSD) \
+    #
+
+JfrGeneratedNames = \
+	jfrEventClasses.hpp \
+	jfrEventControl.hpp \
+	jfrEventIds.hpp \
+	jfrPeriodic.hpp \
+	jfrTypes.hpp
+
+JfrGenSource = $(JFR_TOOLS_SRCDIR)/GenerateJfrFiles.java
+JfrGenClass = $(JFR_TOOLS_OUTPUTDIR)/build/tools/jfr/GenerateJfrFiles.class
+
+JfrGeneratedFiles = $(JfrGeneratedNames:%=$(JFR_OUTPUTDIR/%)
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(JfrGeneratedFiles)
+
+$(JfrGenClass): $(JfrGenSource)
+	mkdir -p $(@D)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JFR_TOOLS_OUTPUTDIR) $(JfrGenSource)
+
+$(JFR_OUTPUTDIR)/jfrEventClasses.hpp: $(METADATA_XML) $(METADATA_XSD) $(JfrGenClass)
+	$(QUIETLY) echo Generating $(@F)
+	mkdir -p $(@D)
+	$(QUIETLY) $(REMOTE) $(RUN.JAVA) -cp $(JFR_TOOLS_OUTPUTDIR) build.tools.jfr.GenerateJfrFiles $(METADATA_XML) $(METADATA_XSD) $(JFR_OUTPUTDIR)
+	test -f $@
+
+$(filter-out $(JFR_OUTPUTDIR)/jfrEventClasses.hpp, $(JfrGeneratedFiles)): $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
+
+TARGETS += $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
+
+# #########################################################################
+
+clean cleanall :
+	rm $(JfrGenClass) $(JfrGeneratedFiles)
+
+# #########################################################################
+
--- a/make/bsd/makefiles/rules.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/bsd/makefiles/rules.make	Mon Aug 12 18:30:40 2019 +0300
@@ -126,8 +126,8 @@
 RUN.JAR$(MAKE_VERBOSE) += >/dev/null
 
 # Settings for javac
-BOOT_SOURCE_LANGUAGE_VERSION = 6
-BOOT_TARGET_CLASS_VERSION = 6
+BOOT_SOURCE_LANGUAGE_VERSION = 8
+BOOT_TARGET_CLASS_VERSION = 8
 JAVAC_FLAGS = -g -encoding ascii
 BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
 
--- a/make/bsd/makefiles/top.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/bsd/makefiles/top.make	Mon Aug 12 18:30:40 2019 +0300
@@ -80,7 +80,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff dtrace_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -94,9 +94,9 @@
 jvmti_stuff: $(Cached_plat) $(adjust-mflags)
 	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
 
-# generate trace files
-trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
-	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+# generate JFR files
+jfr_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f jfr.make $(MFLAGS-adjusted)
 
 ifeq ($(OS_VENDOR), Darwin)
 # generate dtrace header files
--- a/make/bsd/makefiles/trace.make	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-#
-# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# This makefile (trace.make) is included from the trace.make in the
-# build directories.
-#
-# It knows how to build and run the tools to generate trace files.
-
-include $(GAMMADIR)/make/bsd/makefiles/rules.make
-include $(GAMMADIR)/make/altsrc.make
-
-# #########################################################################
-
-HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
-  echo "true"; else echo "false";\
-  fi)
-
-TOPDIR      = $(shell echo `pwd`)
-GENERATED   = $(TOPDIR)/../generated
-JvmtiOutDir = $(GENERATED)/jvmtifiles
-TraceOutDir   = $(GENERATED)/tracefiles
-
-TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
-TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
-
-# set VPATH so make knows where to look for source files
-Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
-VPATH += $(Src_Dirs_V:%=%:)
-
-TraceGeneratedNames =     \
-    traceEventClasses.hpp \
-	traceEventIds.hpp     \
-	traceTypes.hpp
-
-ifeq ($(HAS_ALT_SRC), true)
-TraceGeneratedNames +=  \
-	traceRequestables.hpp \
-    traceEventControl.hpp
-
-ifneq ($(INCLUDE_TRACE), false)
-TraceGeneratedNames += traceProducer.cpp
-endif
-
-endif
-
-
-TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
-
-XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
-
-XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
-	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
-ifeq ($(HAS_ALT_SRC), true)
-	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
-endif
-
-.PHONY: all clean cleanall
-
-# #########################################################################
-
-all: $(TraceGeneratedFiles)
-
-GENERATE_CODE= \
-  $(QUIETLY) echo Generating $@; \
-  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
-  test -f $@
-
-$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-ifeq ($(HAS_ALT_SRC), false)
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-else
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-endif
-
-# #########################################################################
-
-
-clean cleanall:
-	rm $(TraceGeneratedFiles)
-
--- a/make/bsd/makefiles/vm.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/bsd/makefiles/vm.make	Mon Aug 12 18:30:40 2019 +0300
@@ -52,7 +52,7 @@
 # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
 # The adfiles directory contains ad_<arch>.[ch]pp.
 # The jvmtifiles directory contains jvmti*.[ch]pp
-Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles
 VPATH += $(Src_Dirs_V:%=%:)
 
 # set INCLUDES for C preprocessor.
@@ -164,24 +164,21 @@
   LIBJVM_DIZ         = lib$(JVM).diz
 endif
 
+ifeq ($(ENABLE_JFR),false)
+EXCLUDE_JFR_PATHS:= -o -name jfr -prune
+endif
 SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
 
 SOURCE_PATHS=\
   $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \
-      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \))
+      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) $(EXCLUDE_JFR_PATHS) \))
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
-CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
-
-ifneq ($(INCLUDE_TRACE), false)
-CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
-  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
-  fi)
-endif
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles
 
 COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
--- a/make/defs.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/defs.make	Mon Aug 12 18:30:40 2019 +0300
@@ -371,5 +371,9 @@
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
 
+ifeq ($(ENABLE_JFR), true)
+EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/jdk/jfr/internal/types/metadata.xml
+endif
+
 .PHONY: $(HS_ALT_MAKE)/defs.make
 
--- a/make/linux/makefiles/buildtree.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/linux/makefiles/buildtree.make	Mon Aug 12 18:30:40 2019 +0300
@@ -48,7 +48,7 @@
 # flags.make	- with macro settings
 # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
 # adlc.make	-
-# trace.make	- generate tracing event and type definitions
+# jfr.make	- generate jfr event and type definitions
 # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
 # sa.make	- generate SA jar file and natives
 #
@@ -112,6 +112,10 @@
 endif
 endif
 
+ifeq ($(ENABLE_JFR),false)
+ALWAYS_EXCLUDE_DIRS += -o -name jfr
+endif
+
 # Get things from the platform file.
 COMPILER	= $(shell sed -n 's/^compiler[ 	]*=[ 	]*//p' $(PLATFORM_FILE))
 
@@ -119,7 +123,7 @@
 	$(PLATFORM_DIR)/generated/dependencies \
 	$(PLATFORM_DIR)/generated/adfiles \
 	$(PLATFORM_DIR)/generated/jvmtifiles \
-	$(PLATFORM_DIR)/generated/tracefiles
+	$(PLATFORM_DIR)/generated/jfrfiles
 
 TARGETS      = debug fastdebug optimized product
 SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
@@ -127,7 +131,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -200,6 +204,13 @@
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
+ifeq ($(ENABLE_JFR), true)
+  INCLUDE_JFR = 1
+else
+  INCLUDE_JFR = 0
+endif
+
+
 flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
@@ -279,8 +290,7 @@
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
 	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
-	[ -n "$(INCLUDE_TRACE)" ] && \
-	    echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
+	echo && echo "CFLAGS += -DINCLUDE_JFR=$(INCLUDE_JFR)"; \
 	echo; \
 	[ -n "$(SPEC)" ] && \
 	    echo "include $(SPEC)"; \
@@ -349,7 +359,7 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
-trace.make: $(BUILDTREE_MAKE)
+jfr.make: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
 	$(BUILDTREE_COMMENT); \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/makefiles/jfr.make	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,92 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018-2019, Azul Systems, Inc. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (jfr.make) is included from the jfr.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate jfr.
+
+include $(GAMMADIR)/make/linux/makefiles/rules.make
+
+# #########################################################################
+# Build tools needed for the Jfr source code generation
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+
+JFR_TOOLS_SRCDIR := $(GAMMADIR)/src/share/vm/jfr
+JFR_TOOLS_OUTPUTDIR := $(GENERATED)/tools/jfr
+
+JFR_OUTPUTDIR := $(GENERATED)/jfrfiles
+JFR_SRCDIR := $(GAMMADIR)/src/share/vm/jfr/metadata
+
+METADATA_XML ?= $(JFR_SRCDIR)/metadata.xml
+METADATA_XSD ?= $(JFR_SRCDIR)/metadata.xsd
+
+# Changing these will trigger a rebuild of generated jfr files.
+JFR_DEPS += \
+    $(METADATA_XML) \
+    $(METADATA_XSD) \
+    #
+
+JfrGeneratedNames = \
+	jfrEventClasses.hpp \
+	jfrEventControl.hpp \
+	jfrEventIds.hpp \
+	jfrPeriodic.hpp \
+	jfrTypes.hpp
+
+JfrGenSource = $(JFR_TOOLS_SRCDIR)/GenerateJfrFiles.java
+JfrGenClass = $(JFR_TOOLS_OUTPUTDIR)/build/tools/jfr/GenerateJfrFiles.class
+
+JfrGeneratedFiles = $(JfrGeneratedNames:%=$(JFR_OUTPUTDIR/%)
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(JfrGeneratedFiles)
+
+$(JfrGenClass): $(JfrGenSource)
+	mkdir -p $(@D)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JFR_TOOLS_OUTPUTDIR) $(JfrGenSource)
+
+$(JFR_OUTPUTDIR)/jfrEventClasses.hpp: $(METADATA_XML) $(METADATA_XSD) $(JfrGenClass)
+	$(QUIETLY) echo Generating $(@F)
+	mkdir -p $(@D)
+	$(QUIETLY) $(REMOTE) $(RUN.JAVA) -cp $(JFR_TOOLS_OUTPUTDIR) build.tools.jfr.GenerateJfrFiles $(METADATA_XML) $(METADATA_XSD) $(JFR_OUTPUTDIR)
+	test -f $@
+
+$(filter-out $(JFR_OUTPUTDIR)/jfrEventClasses.hpp, $(JfrGeneratedFiles)): $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
+
+TARGETS += $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
+
+# #########################################################################
+
+clean cleanall :
+	rm $(JfrGenClass) $(JfrGeneratedFiles)
+
+# #########################################################################
+
--- a/make/linux/makefiles/rules.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/linux/makefiles/rules.make	Mon Aug 12 18:30:40 2019 +0300
@@ -126,8 +126,8 @@
 RUN.JAR$(MAKE_VERBOSE) += >/dev/null
 
 # Settings for javac
-BOOT_SOURCE_LANGUAGE_VERSION = 6
-BOOT_TARGET_CLASS_VERSION = 6
+BOOT_SOURCE_LANGUAGE_VERSION = 8
+BOOT_TARGET_CLASS_VERSION = 8
 JAVAC_FLAGS = -g -encoding ascii
 BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
 
--- a/make/linux/makefiles/top.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/linux/makefiles/top.make	Mon Aug 12 18:30:40 2019 +0300
@@ -80,7 +80,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -94,9 +94,9 @@
 jvmti_stuff: $(Cached_plat) $(adjust-mflags)
 	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
 
-# generate trace files
-trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
-	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+# generate JFR stuff
+jfr_stuff:  $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f jfr.make $(MFLAGS-adjusted)
 
 # generate SA jar files and native header
 sa_stuff:
--- a/make/linux/makefiles/trace.make	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,120 +0,0 @@
-#
-# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# This makefile (trace.make) is included from the trace.make in the
-# build directories.
-#
-# It knows how to build and run the tools to generate trace files.
-
-include $(GAMMADIR)/make/linux/makefiles/rules.make
-include $(GAMMADIR)/make/altsrc.make
-
-# #########################################################################
-
-HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
-  echo "true"; else echo "false";\
-  fi)
-
-TOPDIR      = $(shell echo `pwd`)
-GENERATED   = $(TOPDIR)/../generated
-JvmtiOutDir = $(GENERATED)/jvmtifiles
-TraceOutDir   = $(GENERATED)/tracefiles
-
-TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
-TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
-
-# set VPATH so make knows where to look for source files
-Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
-VPATH += $(Src_Dirs_V:%=%:)
-
-TraceGeneratedNames =     \
-    traceEventClasses.hpp \
-	traceEventIds.hpp     \
-	traceTypes.hpp
-
-ifeq ($(HAS_ALT_SRC), true)
-TraceGeneratedNames +=  \
-	traceRequestables.hpp \
-    traceEventControl.hpp
-
-ifneq ($(INCLUDE_TRACE), false)
-TraceGeneratedNames += traceProducer.cpp
-endif
-
-endif
-
-TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
-
-XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
-
-XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
-	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
-ifeq ($(HAS_ALT_SRC), true)
-	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
-endif
-
-.PHONY: all clean cleanall
-
-# #########################################################################
-
-all: $(TraceGeneratedFiles)
-
-GENERATE_CODE= \
-  $(QUIETLY) echo Generating $@; \
-  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
-  test -f $@
-
-$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-ifeq ($(HAS_ALT_SRC), false)
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-else
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-endif
-
-# #########################################################################
-
-clean cleanall:
-	rm $(TraceGeneratedFiles)
-
-
--- a/make/linux/makefiles/vm.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/linux/makefiles/vm.make	Mon Aug 12 18:30:40 2019 +0300
@@ -54,7 +54,7 @@
 # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
 # The adfiles directory contains ad_<arch>.[ch]pp.
 # The jvmtifiles directory contains jvmti*.[ch]pp
-Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles
 VPATH += $(Src_Dirs_V:%=%:)
 
 # set INCLUDES for C preprocessor.
@@ -149,24 +149,21 @@
 LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
 LIBJVM_DIZ         = lib$(JVM).diz
 
+ifeq ($(ENABLE_JFR),false)
+EXCLUDE_JFR_PATHS:= -o -name jfr -prune
+endif
 SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
 
 SOURCE_PATHS=\
   $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \
-      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \))
+      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) $(EXCLUDE_JFR_PATHS) \))
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
-CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
-
-ifneq ($(INCLUDE_TRACE), false)
-CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
-  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
-  fi)
-endif
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles
 
 COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
--- a/make/solaris/makefiles/buildtree.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/solaris/makefiles/buildtree.make	Mon Aug 12 18:30:40 2019 +0300
@@ -48,7 +48,7 @@
 # flags.make	- with macro settings
 # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
 # adlc.make	-
-# trace.make	- generate tracing event and type definitions
+# jfr.make	- generate jfr event and type definitions
 # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
 # sa.make	- generate SA jar file and natives
 #
@@ -103,6 +103,10 @@
 endif
 endif
 
+ifeq ($(ENABLE_JFR),false)
+ALWAYS_EXCLUDE_DIRS += -o -name jfr
+endif
+
 # Get things from the platform file.
 COMPILER	= $(shell sed -n 's/^compiler[ 	]*=[ 	]*//p' $(PLATFORM_FILE))
 
@@ -110,7 +114,7 @@
 	$(PLATFORM_DIR)/generated/dependencies \
 	$(PLATFORM_DIR)/generated/adfiles \
 	$(PLATFORM_DIR)/generated/jvmtifiles \
-	$(PLATFORM_DIR)/generated/tracefiles
+	$(PLATFORM_DIR)/generated/jfrfiles
 
 TARGETS      = debug fastdebug optimized product
 SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
@@ -118,7 +122,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -190,6 +194,12 @@
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
+ifeq ($(ENABLE_JFR), true)
+  INCLUDE_JFR = 1
+else
+  INCLUDE_JFR = 0
+endif
+
 flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
@@ -269,9 +279,10 @@
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
 	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
+            echo && echo "CFLAGS += -DINCLUDE_JFR=$(INCLUDE_JFR)"; \
 	echo; \
-	[ -n "$(INCLUDE_TRACE)" ] && \
-	    echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
+	[ -n "$(INCLUDE_JFR)" ] && \
+	    echo && echo "INCLUDE_JFR = $(INCLUDE_JFR)"; \
 	[ -n "$(SPEC)" ] && \
 	    echo "include $(SPEC)"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
@@ -339,7 +350,7 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
-trace.make: $(BUILDTREE_MAKE)
+jfr.make: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
 	$(BUILDTREE_COMMENT); \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/solaris/makefiles/jfr.make	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,92 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018-2019, Azul Systems, Inc. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (jfr.make) is included from the jfr.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate jfr.
+
+include $(GAMMADIR)/make/linux/makefiles/rules.make
+
+# #########################################################################
+# Build tools needed for the Jfr source code generation
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+
+JFR_TOOLS_SRCDIR := $(GAMMADIR)/src/share/vm/jfr
+JFR_TOOLS_OUTPUTDIR := $(GENERATED)/tools/jfr
+
+JFR_OUTPUTDIR := $(GENERATED)/jfrfiles
+JFR_SRCDIR := $(GAMMADIR)/src/share/vm/jfr/metadata
+
+METADATA_XML ?= $(JFR_SRCDIR)/metadata.xml
+METADATA_XSD ?= $(JFR_SRCDIR)/metadata.xsd
+
+# Changing these will trigger a rebuild of generated jfr files.
+JFR_DEPS += \
+    $(METADATA_XML) \
+    $(METADATA_XSD) \
+    #
+
+JfrGeneratedNames = \
+	jfrEventClasses.hpp \
+	jfrEventControl.hpp \
+	jfrEventIds.hpp \
+	jfrPeriodic.hpp \
+	jfrTypes.hpp
+
+JfrGenSource = $(JFR_TOOLS_SRCDIR)/GenerateJfrFiles.java
+JfrGenClass = $(JFR_TOOLS_OUTPUTDIR)/build/tools/jfr/GenerateJfrFiles.class
+
+JfrGeneratedFiles = $(JfrGeneratedNames:%=$(JFR_OUTPUTDIR/%)
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(JfrGeneratedFiles)
+
+$(JfrGenClass): $(JfrGenSource)
+	mkdir -p $(@D)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JFR_TOOLS_OUTPUTDIR) $(JfrGenSource)
+
+$(JFR_OUTPUTDIR)/jfrEventClasses.hpp: $(METADATA_XML) $(METADATA_XSD) $(JfrGenClass)
+	$(QUIETLY) echo Generating $(@F)
+	mkdir -p $(@D)
+	$(QUIETLY) $(REMOTE) $(RUN.JAVA) -cp $(JFR_TOOLS_OUTPUTDIR) build.tools.jfr.GenerateJfrFiles $(METADATA_XML) $(METADATA_XSD) $(JFR_OUTPUTDIR)
+	test -f $@
+
+$(filter-out $(JFR_OUTPUTDIR)/jfrEventClasses.hpp, $(JfrGeneratedFiles)): $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
+
+TARGETS += $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
+
+# #########################################################################
+
+clean cleanall :
+	rm $(JfrGenClass) $(JfrGeneratedFiles)
+
+# #########################################################################
+
--- a/make/solaris/makefiles/rules.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/solaris/makefiles/rules.make	Mon Aug 12 18:30:40 2019 +0300
@@ -118,8 +118,8 @@
 RUN.JAR$(MAKE_VERBOSE) += >/dev/null
 
 # Settings for javac
-BOOT_SOURCE_LANGUAGE_VERSION = 6
-BOOT_TARGET_CLASS_VERSION = 6
+BOOT_SOURCE_LANGUAGE_VERSION = 8
+BOOT_TARGET_CLASS_VERSION = 8
 JAVAC_FLAGS = -g -encoding ascii
 BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
 
--- a/make/solaris/makefiles/top.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/solaris/makefiles/top.make	Mon Aug 12 18:30:40 2019 +0300
@@ -73,7 +73,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -87,9 +87,9 @@
 jvmti_stuff: $(Cached_plat) $(adjust-mflags)
 	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
 
-# generate trace files 
-trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
-	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+# generate JFR files
+jfr_stuff:  $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f jfr.make $(MFLAGS-adjusted)
 
 # generate SA jar files and native header
 sa_stuff:
--- a/make/solaris/makefiles/trace.make	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,120 +0,0 @@
-#
-# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# This makefile (trace.make) is included from the trace.make in the
-# build directories.
-#
-# It knows how to build and run the tools to generate trace files.
-
-include $(GAMMADIR)/make/solaris/makefiles/rules.make
-include $(GAMMADIR)/make/altsrc.make
-
-# #########################################################################
-
-HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
-  echo "true"; else echo "false";\
-  fi)
-
-TOPDIR      = $(shell echo `pwd`)
-GENERATED   = $(TOPDIR)/../generated
-JvmtiOutDir = $(GENERATED)/jvmtifiles
-TraceOutDir   = $(GENERATED)/tracefiles
-
-TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
-TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
-
-# set VPATH so make knows where to look for source files
-Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
-VPATH += $(Src_Dirs_V:%=%:)
-
-TraceGeneratedNames =     \
-    traceEventClasses.hpp \
-	traceEventIds.hpp     \
-	traceTypes.hpp
-
-ifeq ($(HAS_ALT_SRC), true)
-TraceGeneratedNames +=  \
-	traceRequestables.hpp \
-    traceEventControl.hpp
-
-ifneq ($(INCLUDE_TRACE), false)
-  TraceGeneratedNames += traceProducer.cpp
-endif
-
-endif
-
-TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
-
-XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
-
-XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
-	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
-ifeq ($(HAS_ALT_SRC), true)
-	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
-endif
-
-.PHONY: all clean cleanall
-
-# #########################################################################
-
-all: $(TraceGeneratedFiles)
-
-GENERATE_CODE= \
-  $(QUIETLY) echo Generating $@; \
-  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
-  test -f $@
-
-$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-ifeq ($(HAS_ALT_SRC), false)
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-else
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
-endif
-
-# #########################################################################
-
-clean cleanall:
-	rm $(TraceGeneratedFiles)
-
-
--- a/make/solaris/makefiles/vm.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/solaris/makefiles/vm.make	Mon Aug 12 18:30:40 2019 +0300
@@ -48,7 +48,7 @@
 # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
 # The adfiles directory contains ad_<arch>.[ch]pp.
 # The jvmtifiles directory contains jvmti*.[ch]pp
-Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles
 VPATH += $(Src_Dirs_V:%=%:)
 
 # set INCLUDES for C preprocessor
@@ -144,6 +144,8 @@
 
 LIBS += -lkstat
 
+LIBS += -lrt
+
 # By default, link the *.o into the library, not the executable.
 LINK_INTO$(LINK_INTO) = LIBJVM
 
@@ -161,25 +163,21 @@
 
 LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
 LIBJVM_DIZ         = lib$(JVM).diz
-
+ifeq ($(ENABLE_JFR),false)
+EXCLUDE_JFR_PATHS:= -o -name jfr -prune
+endif
 SPECIAL_PATHS:=adlc c1 dist gc_implementation opto shark libadt
 
 SOURCE_PATHS=\
   $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \
-      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \))
+      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) $(EXCLUDE_JFR_PATHS) \))
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
-CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
-
-ifneq ($(INCLUDE_TRACE), false)
-CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
-  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
-  fi)
-endif
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles
 
 COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
--- a/make/windows/build.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/build.make	Mon Aug 12 18:30:40 2019 +0300
@@ -302,6 +302,7 @@
 	@ echo LD=$(LD)						>> $@
 	@ echo MT=$(MT)						>> $@
 	@ echo RC=$(RC)						>> $@
+	@ echo ENABLE_JFR=$(ENABLE_JFR)				>> $@
 	@ sh $(WorkSpace)/make/windows/get_msc_ver.sh		>> $@
 	@ if "$(ENABLE_FULL_DEBUG_SYMBOLS)" NEQ "" echo ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) >> $@
 	@ if "$(ZIP_DEBUGINFO_FILES)" NEQ "" echo ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) >> $@
--- a/make/windows/create_obj_files.sh	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/create_obj_files.sh	Mon Aug 12 18:30:40 2019 +0300
@@ -57,8 +57,8 @@
 COMMONSRC=${WorkSpace}/${COMMONSRC_REL}
 ALTSRC=${WorkSpace}/${ALTSRC_REL}
 
-BASE_PATHS="`if [ -d ${ALTSRC}/share/vm ]; then $FIND ${ALTSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt \); fi`"
-BASE_PATHS="${BASE_PATHS} ` $FIND ${COMMONSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt \)`"
+BASE_PATHS="`if [ -d ${ALTSRC}/share/vm ]; then $FIND ${ALTSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt -o -name jfr \); fi`"
+BASE_PATHS="${BASE_PATHS} ` $FIND ${COMMONSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt -o -name jfr \)`"
 
 for sd in \
     share/vm/gc_implementation/shared \
@@ -71,10 +71,10 @@
   BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}"
 done
 
-BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
+BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/jfrfiles"
 
-if [ -d "${ALTSRC}/share/vm/jfr/buffers" ]; then
-  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
+if [ "$ENABLE_JFR" = "true" ]; then
+BASE_PATHS="${BASE_PATHS} `$FIND ${COMMONSRC}/share/vm/jfr -type d`"
 fi
 
 BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"
--- a/make/windows/makefiles/compile.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/makefiles/compile.make	Mon Aug 12 18:30:40 2019 +0300
@@ -314,6 +314,13 @@
 
 CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG)
 
+!if "$(ENABLE_JFR)" == "true"
+INCLUDE_JFR=1
+!else
+INCLUDE_JFR=0
+!endif
+CXX_FLAGS=$(CXX_FLAGS) /D INCLUDE_JFR=$(INCLUDE_JFR)
+
 # If NO_OPTIMIZATIONS is defined in the environment, turn everything off
 !ifdef NO_OPTIMIZATIONS
 PRODUCT_OPT_OPTION   = $(DEBUG_OPT_OPTION)
@@ -357,4 +364,3 @@
 !if "$(MFC_DEBUG)" == "true"
 RC_FLAGS = $(RC_FLAGS) /D "_DEBUG"
 !endif
-
--- a/make/windows/makefiles/defs.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/makefiles/defs.make	Mon Aug 12 18:30:40 2019 +0300
@@ -306,3 +306,6 @@
     MAKE_ARGS += MT="$(subst /,\\,$(MT))"
   endif
 endif
+
+MAKE_ARGS += ENABLE_JFR=$(ENABLE_JFR)
+
--- a/make/windows/makefiles/generated.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/makefiles/generated.make	Mon Aug 12 18:30:40 2019 +0300
@@ -30,9 +30,9 @@
 JvmtiOutDir=jvmtifiles
 !include $(WorkSpace)/make/windows/makefiles/jvmti.make
 
-# Pick up rules for building trace
-TraceOutDir=tracefiles
-!include $(WorkSpace)/make/windows/makefiles/trace.make
+# Pick up rules for building JFR
+JfrOutDir=jfrfiles
+!include $(WorkSpace)/make/windows/makefiles/jfr.make
 
 # Pick up rules for building SA
 !include $(WorkSpace)/make/windows/makefiles/sa.make
@@ -40,9 +40,9 @@
 AdlcOutDir=adfiles
 
 !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered")
-default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
+default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(JfrGeneratedFiles) buildobjfiles
 !else
-default:: $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
+default:: $(JvmtiGeneratedFiles) $(JfrGeneratedFiles) buildobjfiles
 !endif
 
 buildobjfiles:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/windows/makefiles/jfr.make	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,81 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018-2019, Azul Systems, Inc. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (jfr.make) is included from the jfr.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate jfr.
+
+!include $(WorkSpace)/make/windows/makefiles/rules.make
+
+# #########################################################################
+# Build tools needed for the Jfr source code generation
+
+GENERATED   = ../generated
+
+JFR_TOOLS_SRCDIR = $(WorkSpace)/src/share/vm/jfr
+JFR_TOOLS_OUTPUTDIR = $(GENERATED)/tools/jfr
+
+JFR_OUTPUTDIR = $(GENERATED)/jfrfiles
+JFR_SRCDIR = $(WorkSpace)/src/share/vm/jfr/metadata
+
+METADATA_XML = $(JFR_SRCDIR)/metadata.xml
+METADATA_XSD = $(JFR_SRCDIR)/metadata.xsd
+
+# Changing these will trigger a rebuild of generated jfr files.
+JFR_DEPS = $(METADATA_XML) \
+	$(METADATA_XSD)
+
+JfrGeneratedFiles = \
+	$(JFR_OUTPUTDIR)/jfrEventControl.hpp \
+	$(JFR_OUTPUTDIR)/jfrEventIds.hpp \
+	$(JFR_OUTPUTDIR)/jfrPeriodic.hpp \
+	$(JFR_OUTPUTDIR)/jfrTypes.hpp
+
+JfrGenSource = $(JFR_TOOLS_SRCDIR)/GenerateJfrFiles.java
+JfrGenClass = $(JFR_TOOLS_OUTPUTDIR)/build/tools/jfr/GenerateJfrFiles.class
+
+.PHONY: all cleanall
+
+# #########################################################################
+
+all: $(JfrGeneratedFiles)
+
+$(JfrGenClass): $(JfrGenSource)
+	mkdir -p $(@D)
+	$(COMPILE_JAVAC) -d $(JFR_TOOLS_OUTPUTDIR) $(JfrGenSource)
+
+$(JFR_OUTPUTDIR)/jfrEventClasses.hpp: $(METADATA_XML) $(METADATA_XSD) $(JfrGenClass)
+	echo Generating $(@F)
+	mkdir -p $(@D)
+	$(RUN_JAVA) -cp $(JFR_TOOLS_OUTPUTDIR) build.tools.jfr.GenerateJfrFiles $(METADATA_XML) $(METADATA_XSD) $(JFR_OUTPUTDIR)
+	test -f $@
+
+$(JfrGeneratedFiles): $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
+
+# #########################################################################
+
+cleanall :
+	rm $(JfrGenClass) $(JfrGeneratedFiles) $(JFR_OUTPUTDIR)/jfrEventClasses.hpp
--- a/make/windows/makefiles/rules.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/makefiles/rules.make	Mon Aug 12 18:30:40 2019 +0300
@@ -44,8 +44,8 @@
 !endif
 
 # Settings for javac
-BOOT_SOURCE_LANGUAGE_VERSION=6
-BOOT_TARGET_CLASS_VERSION=6
+BOOT_SOURCE_LANGUAGE_VERSION=8
+BOOT_TARGET_CLASS_VERSION=8
 JAVAC_FLAGS=-g -encoding ascii
 BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
 
--- a/make/windows/makefiles/trace.make	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,120 +0,0 @@
-#
-# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# This makefile (trace.make) is included from the trace.make in the
-# build directories.
-#
-# It knows how to build and run the tools to generate trace files.
-
-!include $(WorkSpace)/make/windows/makefiles/rules.make
-
-# #########################################################################
-
-
-TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace
-TraceSrcDir = $(WorkSpace)/src/share/vm/trace
-
-TraceGeneratedNames =     \
-    traceEventClasses.hpp \
-    traceEventIds.hpp     \
-    traceTypes.hpp
-
-!if EXISTS($(TraceAltSrcDir))
-TraceGeneratedNames = $(TraceGeneratedNames) \
-    traceRequestables.hpp \
-    traceEventControl.hpp \
-    traceProducer.cpp
-!endif
-
-
-#Note: TraceGeneratedFiles must be kept in sync with TraceGeneratedNames by hand.
-#Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)"
-TraceGeneratedFiles = \
-    $(TraceOutDir)/traceEventClasses.hpp \
-	$(TraceOutDir)/traceEventIds.hpp     \
-	$(TraceOutDir)/traceTypes.hpp
-
-!if EXISTS($(TraceAltSrcDir))
-TraceGeneratedFiles = $(TraceGeneratedFiles) \
-	$(TraceOutDir)/traceRequestables.hpp \
-    $(TraceOutDir)/traceEventControl.hpp \
-	$(TraceOutDir)/traceProducer.cpp
-!endif
-
-XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
-
-XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
-    $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
-
-!if EXISTS($(TraceAltSrcDir))
-XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
-!endif
-
-.PHONY: all clean cleanall
-
-# #########################################################################
-
-default::
-	@if not exist $(TraceOutDir) mkdir $(TraceOutDir)
-
-$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
-	@echo Generating $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
-
-$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
-	@echo Generating $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
-
-!if !EXISTS($(TraceAltSrcDir))
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	@echo Generating OpenJDK $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
-
-!else
-
-$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	@echo Generating AltSrc $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
-
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	@echo Generating AltSrc $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
-
-$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
-	@echo Generating AltSrc $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
-
-$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
-	@echo Generating AltSrc $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
-
-!endif
-
-# #########################################################################
-
-cleanall :
-	rm $(TraceGeneratedFiles)
-
-
--- a/make/windows/makefiles/vm.make	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/makefiles/vm.make	Mon Aug 12 18:30:40 2019 +0300
@@ -145,7 +145,7 @@
 VM_PATH=../generated
 VM_PATH=$(VM_PATH);../generated/adfiles
 VM_PATH=$(VM_PATH);../generated/jvmtifiles
-VM_PATH=$(VM_PATH);../generated/tracefiles
+VM_PATH=$(VM_PATH);../generated/jfrfiles
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code
@@ -173,11 +173,6 @@
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
 
-!if exists($(ALTSRC)\share\vm\jfr)
-VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
-VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers
-!endif
-
 VM_PATH={$(VM_PATH)}
 
 # Special case files not using precompiled header files.
@@ -209,6 +204,12 @@
 bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
         $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
 
+iphlp_interface.obj: $(WorkSpace)\src\os\windows\vm\iphlp_interface.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\iphlp_interface.cpp
+
+os_perf_windows.obj: $(WorkSpace)\src\os\windows\vm\os_perf_windows.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\os_perf_windows.cpp
+
 # Default rules for the Virtual Machine
 {$(COMMONSRC)\share\vm\c1}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
@@ -384,13 +385,79 @@
 {..\generated\jvmtifiles}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
-{..\generated\tracefiles}.cpp.obj::
+{..\generated\jfrfiles}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\dcmd}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\instrumentation}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\jni}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\leakprofiler}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\leakprofiler\chains}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\leakprofiler\checkpoint}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\leakprofiler\sampling}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\leakprofiler\utilities}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\metadata}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\periodic}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
-{$(ALTSRC)\share\vm\jfr}.cpp.obj::
+{$(COMMONSRC)\share\vm\jfr\periodic\sampling}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder\checkpoint}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder\checkpoint\types}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder\checkpoint\types\traceid}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder\repository}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
-{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
+{$(COMMONSRC)\share\vm\jfr\recorder\service}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder\stacktrace}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder\storage}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\recorder\stringpool}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\support}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\utilities}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
+{$(COMMONSRC)\share\vm\jfr\writers}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 default::
--- a/make/windows/projectfiles/common/Makefile	Thu Aug 01 03:44:03 2019 +0100
+++ b/make/windows/projectfiles/common/Makefile	Mon Aug 12 18:30:40 2019 +0300
@@ -61,8 +61,8 @@
 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make
 
 # Pick up rules for building trace
-TraceOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\tracefiles
-!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/trace.make
+JfrOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jfrfiles
+!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jfr.make
 
 !if "$(Variant)" == "compiler2"
 # Pick up rules for building adlc
--- a/src/cpu/ppc/vm/frame_ppc.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/cpu/ppc/vm/frame_ppc.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -49,14 +49,134 @@
 
 bool frame::safe_for_sender(JavaThread *thread) {
   bool safe = false;
-  address   cursp = (address)sp();
-  address   curfp = (address)fp();
-  if ((cursp != NULL && curfp != NULL &&
-      (cursp <= thread->stack_base() && cursp >= thread->stack_base() - thread->stack_size())) &&
-      (curfp <= thread->stack_base() && curfp >= thread->stack_base() - thread->stack_size())) {
-      safe = true;
+  address sp = (address)_sp;
+  address fp = (address)_fp;
+  address unextended_sp = (address)_unextended_sp;
+
+  // Consider stack guards when trying to determine "safe" stack pointers
+  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
+    thread->stack_red_zone_size() + thread->stack_yellow_zone_size() : 0;
+  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
+
+  // sp must be within the usable part of the stack (not in guards)
+  bool sp_safe = (sp < thread->stack_base()) &&
+                 (sp >= thread->stack_base() - usable_stack_size);
+
+
+  if (!sp_safe) {
+    return false;
+  }
+
+  // Unextended sp must be within the stack and above or equal sp
+  bool unextended_sp_safe = (unextended_sp < thread->stack_base()) && (unextended_sp >= sp);
+
+  if (!unextended_sp_safe) {
+    return false;
   }
-  return safe;
+
+  // An fp must be within the stack and above (but not equal) sp.
+  bool fp_safe = (fp <= thread->stack_base()) &&  (fp > sp);
+  // an interpreter fp must be within the stack and above (but not equal) sp
+  bool fp_interp_safe = (fp <= thread->stack_base()) &&  (fp > sp) &&
+    ((fp - sp) >= (ijava_state_size + top_ijava_frame_abi_size));
+
+  // We know sp/unextended_sp are safe, only fp is questionable here
+
+  // If the current frame is known to the code cache then we can attempt to
+  // to construct the sender and do some validation of it. This goes a long way
+  // toward eliminating issues when we get in frame construction code
+
+  if (_cb != NULL ){
+    // Entry frame checks
+    if (is_entry_frame()) {
+      // An entry frame must have a valid fp.
+      return fp_safe && is_entry_frame_valid(thread);
+    }
+
+    // Now check if the frame is complete and the test is
+    // reliable. Unfortunately we can only check frame completeness for
+    // runtime stubs and nmethods. Other generic buffer blobs are more
+    // problematic so we just assume they are OK. Adapter blobs never have a
+    // complete frame and are never OK
+    if (!_cb->is_frame_complete_at(_pc)) {
+      if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
+        return false;
+      }
+    }
+
+    // Could just be some random pointer within the codeBlob.
+    if (!_cb->code_contains(_pc)) {
+      return false;
+    }
+
+    if (is_interpreted_frame() && !fp_interp_safe) {
+      return false;
+    }
+
+    abi_minframe* sender_abi = (abi_minframe*) fp;
+    intptr_t* sender_sp = (intptr_t*) fp;
+    address   sender_pc = (address) sender_abi->lr;;
+
+    // We must always be able to find a recognizable pc.
+    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
+    if (sender_blob == NULL) {
+      return false;
+    }
+
+    // Could be a zombie method
+    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
+      return false;
+    }
+
+    // It should be safe to construct the sender though it might not be valid.
+
+    frame sender(sender_sp, sender_pc);
+
+    // Do we have a valid fp?
+    address sender_fp = (address) sender.fp();
+
+    // sender_fp must be within the stack and above (but not
+    // equal) current frame's fp.
+    if (sender_fp > thread->stack_base() || sender_fp <= fp) {
+        return false;
+    }
+
+    // If the potential sender is the interpreter then we can do some more checking.
+    if (Interpreter::contains(sender_pc)) {
+      return sender.is_interpreted_frame_valid(thread);
+    }
+
+    // Could just be some random pointer within the codeBlob.
+    if (!sender.cb()->code_contains(sender_pc)) {
+      return false;
+    }
+
+    // We should never be able to see an adapter if the current frame is something from code cache.
+    if (sender_blob->is_adapter_blob()) {
+      return false;
+    }
+
+    if (sender.is_entry_frame()) {
+      return sender.is_entry_frame_valid(thread);
+    }
+
+    // Frame size is always greater than zero. If the sender frame size is zero or less,
+    // something is really weird and we better give up.
+    if (sender_blob->frame_size() <= 0) {
+      return false;
+    }
+
+    return true;
+  }
+
+  // Must be native-compiled frame. Since sender will try and use fp to find
+  // linkages it must be safe
+
+  if (!fp_safe) {
+    return false;
+  }
+
+  return true;
 }
 
 bool frame::is_interpreted_frame() const  {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vm_version_ext_ppc.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "jvm.h"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/vm_version.hpp"
+#include "vm_version_ext_ppc.hpp"
+
+// VM_Version_Ext statics
+int   VM_Version_Ext::_no_of_threads = 0;
+int   VM_Version_Ext::_no_of_cores = 0;
+int   VM_Version_Ext::_no_of_sockets = 0;
+bool  VM_Version_Ext::_initialized = false;
+char  VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0};
+char  VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0};
+
+// get cpu information.
+void VM_Version_Ext::initialize_cpu_information(void) {
+  // do nothing if cpu info has been initialized
+  if (_initialized) {
+    return;
+  }
+
+  _no_of_cores  = os::processor_count();
+  _no_of_threads = _no_of_cores;
+  _no_of_sockets = _no_of_cores;
+  snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "PowerPC POWER%lu", PowerArchitecturePPC64);
+  snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "PPC %s", cpu_features());
+  _initialized = true;
+}
+
+int VM_Version_Ext::number_of_threads(void) {
+  initialize_cpu_information();
+  return _no_of_threads;
+}
+
+int VM_Version_Ext::number_of_cores(void) {
+  initialize_cpu_information();
+  return _no_of_cores;
+}
+
+int VM_Version_Ext::number_of_sockets(void) {
+  initialize_cpu_information();
+  return _no_of_sockets;
+}
+
+const char* VM_Version_Ext::cpu_name(void) {
+  initialize_cpu_information();
+  char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing);
+  if (NULL == tmp) {
+    return NULL;
+  }
+  strncpy(tmp, _cpu_name, CPU_TYPE_DESC_BUF_SIZE);
+  return tmp;
+}
+
+const char* VM_Version_Ext::cpu_description(void) {
+  initialize_cpu_information();
+  char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing);
+  if (NULL == tmp) {
+    return NULL;
+  }
+  strncpy(tmp, _cpu_desc, CPU_DETAILED_DESC_BUF_SIZE);
+  return tmp;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vm_version_ext_ppc.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_VM_VERSION_EXT_PPC_HPP
+#define CPU_PPC_VM_VM_VERSION_EXT_PPC_HPP
+
+#include "utilities/macros.hpp"
+#include "vm_version_ppc.hpp"
+
+#define CPU_INFO        "cpu_info"
+#define CPU_TYPE        "fpu_type"
+#define CPU_DESCRIPTION "implementation"
+#define CHIP_ID         "chip_id"
+#define CORE_ID         "core_id"
+
+class VM_Version_Ext : public VM_Version {
+ private:
+
+  static const size_t      CPU_TYPE_DESC_BUF_SIZE = 256;
+  static const size_t      CPU_DETAILED_DESC_BUF_SIZE = 4096;
+
+  static int               _no_of_threads;
+  static int               _no_of_cores;
+  static int               _no_of_sockets;
+  static bool              _initialized;
+  static char              _cpu_name[CPU_TYPE_DESC_BUF_SIZE];
+  static char              _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE];
+
+  static void initialize_cpu_information(void);
+
+ public:
+
+  static int number_of_threads(void);
+  static int number_of_cores(void);
+  static int number_of_sockets(void);
+
+  static const char* cpu_name(void);
+  static const char* cpu_description(void);
+};
+
+#endif // CPU_PPC_VM_VM_VERSION_EXT_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/sparc/vm/vm_version_ext_sparc.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "jvm.h"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "vm_version_ext_sparc.hpp"
+
+// VM_Version_Ext statics
+int   VM_Version_Ext::_no_of_threads = 0;
+int   VM_Version_Ext::_no_of_cores = 0;
+int   VM_Version_Ext::_no_of_sockets = 0;
+#if defined(SOLARIS)
+kid_t VM_Version_Ext::_kcid = -1;
+#endif
+char  VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0};
+char  VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0};
+
+#if defined(SOLARIS)
+// get cpu information. It takes into account if the kstat chain id
+// has been changed and update the info if necessary.
+bool VM_Version_Ext::initialize_cpu_information(void) {
+
+  int core_id = -1;
+  int chip_id = -1;
+  int len = 0;
+  char* src_string = NULL;
+  kstat_ctl_t* kc = kstat_open();
+  if (!kc) {
+    return false;
+  }
+
+  // check if kstat chain has been updated
+  kid_t kcid = kstat_chain_update(kc);
+  if (kcid == -1) {
+    kstat_close(kc);
+    return false;
+  }
+
+  bool updated = ((kcid > 0) && (kcid != _kcid)) ||
+                 ((kcid == 0) && (_kcid == -1));
+  if (!updated) {
+    kstat_close(kc);
+    return true;
+  }
+
+  // update the cached _kcid
+  _kcid = kcid;
+
+  // find the number of online processors
+  // for modern processsors, it is also known as the
+  // hardware threads.
+  _no_of_threads  = sysconf(_SC_NPROCESSORS_ONLN);
+
+  if (_no_of_threads <= 0 ) {
+    kstat_close(kc);
+    return false;
+  }
+
+  _no_of_cores = 0;
+  _no_of_sockets = 0;
+
+  // loop through the kstat chain
+  kstat_t* ksp = NULL;
+  for (ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) {
+    // only interested in "cpu_info"
+    if (strcmp(ksp->ks_module, (char*)CPU_INFO) == 0) {
+      if (kstat_read(kc, ksp, NULL) == -1) {
+        kstat_close(kc);
+        return false;
+      }
+      if (ksp->ks_data != NULL) {
+        kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
+        // loop through the number of fields in each record
+        for (int i = 0; i < ksp->ks_ndata; i++) {
+          // set cpu type if it hasn't been already set
+          if ((strcmp((const char*)&(knm[i].name), CPU_TYPE) == 0) &&
+                     (_cpu_name[0] == '\0')) {
+            if (knm[i].data_type == KSTAT_DATA_STRING) {
+              src_string = (char*)KSTAT_NAMED_STR_PTR(&knm[i]);
+            } else {
+              src_string = (char*)&(knm[i].value.c[0]);
+            }
+            len = strlen(src_string);
+            if (len < CPU_TYPE_DESC_BUF_SIZE) {
+              jio_snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE,
+                                         "%s", src_string);
+            }
+          }
+
+          // set cpu description if it hasn't been already set
+          if ((strcmp((const char*)&(knm[i].name), CPU_DESCRIPTION) == 0) &&
+                      (_cpu_desc[0] == '\0')) {
+            if (knm[i].data_type == KSTAT_DATA_STRING) {
+              src_string = (char*)KSTAT_NAMED_STR_PTR(&knm[i]);
+            } else {
+              src_string = (char*)&(knm[i].value.c[0]);
+            }
+            len = strlen(src_string);
+            if (len < CPU_DETAILED_DESC_BUF_SIZE) {
+              jio_snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE,
+                                         "%s", src_string);
+            }
+          }
+
+          // count the number of sockets based on the chip id
+          if (strcmp((const char*)&(knm[i].name), CHIP_ID) == 0) {
+            if (chip_id != knm[i].value.l) {
+              chip_id = knm[i].value.l;
+              _no_of_sockets++;
+            }
+          }
+
+          // count the number of cores based on the core id
+          if (strcmp((const char*)&(knm[i].name), CORE_ID) == 0) {
+            if (core_id != knm[i].value.l) {
+              core_id = knm[i].value.l;
+              _no_of_cores++;
+            }
+          }
+        }
+      }
+    }
+  }
+
+  kstat_close(kc);
+  return true;
+}
+#elif defined(LINUX)
+// get cpu information.
+bool VM_Version_Ext::initialize_cpu_information(void) {
+  // Not yet implemented.
+  return false;
+}
+#endif
+
+int VM_Version_Ext::number_of_threads(void) {
+  initialize_cpu_information();
+  return _no_of_threads;
+}
+
+int VM_Version_Ext::number_of_cores(void) {
+  initialize_cpu_information();
+  return _no_of_cores;
+}
+
+int VM_Version_Ext::number_of_sockets(void) {
+  initialize_cpu_information();
+  return _no_of_sockets;
+}
+
+const char* VM_Version_Ext::cpu_name(void) {
+  if (!initialize_cpu_information()) {
+    return NULL;
+  }
+  char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing);
+  if (NULL == tmp) {
+    return NULL;
+  }
+  strncpy(tmp, _cpu_name, CPU_TYPE_DESC_BUF_SIZE);
+  return tmp;
+}
+
+const char* VM_Version_Ext::cpu_description(void) {
+  if (!initialize_cpu_information()) {
+    return NULL;
+  }
+  char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing);
+  if (NULL == tmp) {
+    return NULL;
+  }
+  strncpy(tmp, _cpu_desc, CPU_DETAILED_DESC_BUF_SIZE);
+  return tmp;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/sparc/vm/vm_version_ext_sparc.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_SPARC_VM_VM_VERSION_EXT_SPARC_HPP
+#define CPU_SPARC_VM_VM_VERSION_EXT_SPARC_HPP
+
+#include "utilities/macros.hpp"
+#include "vm_version_sparc.hpp"
+
+#if defined(SOLARIS)
+#include <kstat.h>
+#include <sys/processor.h>
+#endif
+
+#define CPU_INFO        "cpu_info"
+#define CPU_TYPE        "fpu_type"
+#define CPU_DESCRIPTION "implementation"
+#define CHIP_ID         "chip_id"
+#define CORE_ID         "core_id"
+
+class VM_Version_Ext : public VM_Version {
+ private:
+
+  static const size_t      CPU_TYPE_DESC_BUF_SIZE = 256;
+  static const size_t      CPU_DETAILED_DESC_BUF_SIZE = 4096;
+
+  static int               _no_of_threads;
+  static int               _no_of_cores;
+  static int               _no_of_sockets;
+#if defined(SOLARIS)
+  static kid_t             _kcid;
+#endif
+  static char              _cpu_name[CPU_TYPE_DESC_BUF_SIZE];
+  static char              _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE];
+
+  static bool initialize_cpu_information(void);
+
+ public:
+
+  static int number_of_threads(void);
+  static int number_of_cores(void);
+  static int number_of_sockets(void);
+
+  static const char* cpu_name(void);
+  static const char* cpu_description(void);
+};
+
+#endif // CPU_SPARC_VM_VM_VERSION_EXT_SPARC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/rdtsc_x86.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+//#include "os_linux_x86.inline.hpp"
+#ifdef TARGET_OS_ARCH_linux_x86
+# include "os_linux_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_bsd_x86
+# include "os_bsd_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_windows_x86
+# include "os_windows_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_solaris_x86
+# include "os_solaris_x86.inline.hpp"
+#endif
+
+#include "rdtsc_x86.hpp"
+#include "runtime/thread.inline.hpp"
+#include "vm_version_ext_x86.hpp"
+#include "runtime/os.hpp"
+
+// The following header contains the implementations of rdtsc()
+
+static jlong _epoch = 0;
+static bool rdtsc_elapsed_counter_enabled = false;
+static jlong tsc_frequency = 0;
+
+static jlong set_epoch() {
+  assert(0 == _epoch, "invariant");
+  _epoch = os::rdtsc();
+  return _epoch;
+}
+
+// Base loop to estimate ticks frequency for tsc counter from user mode.
+// Volatiles and sleep() are used to prevent compiler from applying optimizations.
+static void do_time_measurements(volatile jlong& time_base,
+                                 volatile jlong& time_fast,
+                                 volatile jlong& time_base_elapsed,
+                                 volatile jlong& time_fast_elapsed) {
+  static const unsigned int FT_SLEEP_MILLISECS = 1;
+  const unsigned int loopcount = 3;
+
+  volatile jlong start = 0;
+  volatile jlong fstart = 0;
+  volatile jlong end = 0;
+  volatile jlong fend = 0;
+
+  // Figure out the difference between rdtsc and os provided timer.
+  // base algorithm adopted from JRockit.
+  for (unsigned int times = 0; times < loopcount; times++) {
+    start = os::elapsed_counter();
+    OrderAccess::fence();
+    fstart = os::rdtsc();
+
+    // use sleep to prevent compiler from optimizing
+    os::sleep(Thread::current(), FT_SLEEP_MILLISECS, true);
+
+    end = os::elapsed_counter();
+    OrderAccess::fence();
+    fend = os::rdtsc();
+
+    time_base += end - start;
+    time_fast += fend - fstart;
+
+    // basis for calculating the os tick start
+    // to fast time tick start offset
+    time_base_elapsed += end;
+    time_fast_elapsed += (fend - _epoch);
+  }
+
+  time_base /= loopcount;
+  time_fast /= loopcount;
+  time_base_elapsed /= loopcount;
+  time_fast_elapsed /= loopcount;
+}
+
+static jlong initialize_frequency() {
+  assert(0 == tsc_frequency, "invariant");
+  assert(0 == _epoch, "invariant");
+  const jlong initial_counter = set_epoch();
+  if (initial_counter == 0) {
+    return 0;
+  }
+  // os time frequency
+  static double os_freq = (double)os::elapsed_frequency();
+  assert(os_freq > 0, "os_elapsed frequency corruption!");
+
+  double tsc_freq = .0;
+  double os_to_tsc_conv_factor = 1.0;
+
+  // if platform supports invariant tsc,
+  // apply higher resolution and granularity for conversion calculations
+  if (VM_Version_Ext::supports_tscinv_ext()) {
+    // for invariant tsc platforms, take the maximum qualified cpu frequency
+    tsc_freq = (double)VM_Version_Ext::maximum_qualified_cpu_frequency();
+    os_to_tsc_conv_factor = tsc_freq / os_freq;
+  } else {
+    // use measurements to estimate
+    // a conversion factor and the tsc frequency
+
+    volatile jlong time_base = 0;
+    volatile jlong time_fast = 0;
+    volatile jlong time_base_elapsed = 0;
+    volatile jlong time_fast_elapsed = 0;
+
+    // do measurements to get base data
+    // on os timer and fast ticks tsc time relation.
+    do_time_measurements(time_base, time_fast, time_base_elapsed, time_fast_elapsed);
+
+    // if invalid measurements, cannot proceed
+    if (time_fast == 0 || time_base == 0) {
+      return 0;
+    }
+
+    os_to_tsc_conv_factor = (double)time_fast / (double)time_base;
+    if (os_to_tsc_conv_factor > 1) {
+      // estimate on tsc counter frequency
+      tsc_freq = os_to_tsc_conv_factor * os_freq;
+    }
+  }
+
+  if ((tsc_freq < 0) || (tsc_freq > 0 && tsc_freq <= os_freq) || (os_to_tsc_conv_factor <= 1)) {
+    // safer to run with normal os time
+    tsc_freq = .0;
+  }
+
+  // frequency of the tsc_counter
+  return (jlong)tsc_freq;
+}
+
+static bool initialize_elapsed_counter() {
+  tsc_frequency = initialize_frequency();
+  return tsc_frequency != 0 && _epoch != 0;
+}
+
+static bool ergonomics() {
+  const bool invtsc_support = Rdtsc::is_supported();
+  if (FLAG_IS_DEFAULT(UseFastUnorderedTimeStamps) && invtsc_support) {
+    FLAG_SET_ERGO(bool, UseFastUnorderedTimeStamps, true);
+  }
+
+  bool ft_enabled = UseFastUnorderedTimeStamps && invtsc_support;
+
+  if (!ft_enabled) {
+    if (UseFastUnorderedTimeStamps && VM_Version::supports_tsc()) {
+      warning("\nThe hardware does not support invariant tsc (INVTSC) register and/or cannot guarantee tsc synchronization between sockets at startup.\n"\
+        "Values returned via rdtsc() are not guaranteed to be accurate, esp. when comparing values from cross sockets reads. Enabling UseFastUnorderedTimeStamps on non-invariant tsc hardware should be considered experimental.\n");
+      ft_enabled = true;
+    }
+  }
+
+  if (!ft_enabled) {
+    // Warn if unable to support command-line flag
+    if (UseFastUnorderedTimeStamps && !VM_Version::supports_tsc()) {
+      warning("Ignoring UseFastUnorderedTimeStamps, hardware does not support normal tsc");
+    }
+  }
+
+  return ft_enabled;
+}
+
+bool Rdtsc::is_supported() {
+  return VM_Version_Ext::supports_tscinv_ext();
+}
+
+bool Rdtsc::is_elapsed_counter_enabled() {
+  return rdtsc_elapsed_counter_enabled;
+}
+
+jlong Rdtsc::frequency() {
+  return tsc_frequency;
+}
+
+jlong Rdtsc::elapsed_counter() {
+  return os::rdtsc() - _epoch;
+}
+
+jlong Rdtsc::epoch() {
+  return _epoch;
+}
+
+jlong Rdtsc::raw() {
+  return os::rdtsc();
+}
+
+bool Rdtsc::initialize() {
+  static bool initialized = false;
+  if (!initialized) {
+    assert(!rdtsc_elapsed_counter_enabled, "invariant");
+    VM_Version_Ext::initialize();
+    assert(0 == tsc_frequency, "invariant");
+    assert(0 == _epoch, "invariant");
+    bool result = initialize_elapsed_counter(); // init hw
+    if (result) {
+      result = ergonomics(); // check logical state
+    }
+    rdtsc_elapsed_counter_enabled = result;
+    initialized = true;
+  }
+  return rdtsc_elapsed_counter_enabled;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/rdtsc_x86.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_RDTSC_X86_HPP
+#define CPU_X86_VM_RDTSC_X86_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
+
+// Interface to the x86 rdtsc() time counter, if available.
+// Not guaranteed to be synchronized across hardware threads and
+// therefore software threads, and can be updated asynchronously
+// by software. elapsed_counter() can jump backwards
+// as well as jump forward when threads query different cores/sockets.
+// Very much not recommended for general use.
+// INVTSC is a minimal requirement for auto-enablement.
+
+class Rdtsc : AllStatic {
+ public:
+  static jlong elapsed_counter(); // provides quick time stamps
+  static jlong frequency();       // tsc register
+  static bool  is_supported();    // InvariantTSC
+  static jlong raw();             // direct rdtsc() access
+  static bool  is_elapsed_counter_enabled(); // turn off with -XX:-UseFastUnorderedTimeStamps
+  static jlong epoch();
+  static bool  initialize();
+};
+
+#endif // CPU_X86_VM_RDTSC_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/vm_version_ext_x86.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,966 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "utilities/macros.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/java.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "vm_version_ext_x86.hpp"
+
+typedef enum {
+   CPU_FAMILY_8086_8088  = 0,
+   CPU_FAMILY_INTEL_286  = 2,
+   CPU_FAMILY_INTEL_386  = 3,
+   CPU_FAMILY_INTEL_486  = 4,
+   CPU_FAMILY_PENTIUM    = 5,
+   CPU_FAMILY_PENTIUMPRO = 6,    // Same family several models
+   CPU_FAMILY_PENTIUM_4  = 0xF
+} FamilyFlag;
+
+ typedef enum {
+    RDTSCP_FLAG  = 0x08000000, // bit 27
+    INTEL64_FLAG = 0x20000000  // bit 29
+  } _featureExtendedEdxFlag;
+
+#define CPUID_STANDARD_FN   0x0
+#define CPUID_STANDARD_FN_1 0x1
+#define CPUID_STANDARD_FN_4 0x4
+#define CPUID_STANDARD_FN_B 0xb
+
+#define CPUID_EXTENDED_FN   0x80000000
+#define CPUID_EXTENDED_FN_1 0x80000001
+#define CPUID_EXTENDED_FN_2 0x80000002
+#define CPUID_EXTENDED_FN_3 0x80000003
+#define CPUID_EXTENDED_FN_4 0x80000004
+#define CPUID_EXTENDED_FN_7 0x80000007
+#define CPUID_EXTENDED_FN_8 0x80000008
+
+typedef enum {
+   FPU_FLAG     = 0x00000001,
+   VME_FLAG     = 0x00000002,
+   DE_FLAG      = 0x00000004,
+   PSE_FLAG     = 0x00000008,
+   TSC_FLAG     = 0x00000010,
+   MSR_FLAG     = 0x00000020,
+   PAE_FLAG     = 0x00000040,
+   MCE_FLAG     = 0x00000080,
+   CX8_FLAG     = 0x00000100,
+   APIC_FLAG    = 0x00000200,
+   SEP_FLAG     = 0x00000800,
+   MTRR_FLAG    = 0x00001000,
+   PGE_FLAG     = 0x00002000,
+   MCA_FLAG     = 0x00004000,
+   CMOV_FLAG    = 0x00008000,
+   PAT_FLAG     = 0x00010000,
+   PSE36_FLAG   = 0x00020000,
+   PSNUM_FLAG   = 0x00040000,
+   CLFLUSH_FLAG = 0x00080000,
+   DTS_FLAG     = 0x00200000,
+   ACPI_FLAG    = 0x00400000,
+   MMX_FLAG     = 0x00800000,
+   FXSR_FLAG    = 0x01000000,
+   SSE_FLAG     = 0x02000000,
+   SSE2_FLAG    = 0x04000000,
+   SS_FLAG      = 0x08000000,
+   HTT_FLAG     = 0x10000000,
+   TM_FLAG      = 0x20000000
+} FeatureEdxFlag;
+
+static BufferBlob* cpuid_brand_string_stub_blob;
+static const int   cpuid_brand_string_stub_size = 550;
+
+extern "C" {
+  typedef void (*getCPUIDBrandString_stub_t)(void*);
+}
+
+static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = NULL;
+
+class VM_Version_Ext_StubGenerator: public StubCodeGenerator {
+ public:
+
+  VM_Version_Ext_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
+
+  address generate_getCPUIDBrandString(void) {
+    // Flags to test CPU type.
+    const uint32_t HS_EFL_AC           = 0x40000;
+    const uint32_t HS_EFL_ID           = 0x200000;
+    // Values for when we don't have a CPUID instruction.
+    const int      CPU_FAMILY_SHIFT = 8;
+    const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
+    const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);
+
+    Label detect_486, cpu486, detect_586, done, ext_cpuid;
+
+    StubCodeMark mark(this, "VM_Version_Ext", "getCPUIDNameInfo_stub");
+#   define __ _masm->
+
+    address start = __ pc();
+
+    //
+    // void getCPUIDBrandString(VM_Version::CpuidInfo* cpuid_info);
+    //
+    // LP64: rcx and rdx are first and second argument registers on windows
+
+    __ push(rbp);
+#ifdef _LP64
+    __ mov(rbp, c_rarg0); // cpuid_info address
+#else
+    __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
+#endif
+    __ push(rbx);
+    __ push(rsi);
+    __ pushf();          // preserve rbx, and flags
+    __ pop(rax);
+    __ push(rax);
+    __ mov(rcx, rax);
+    //
+    // if we are unable to change the AC flag, we have a 386
+    //
+    __ xorl(rax, HS_EFL_AC);
+    __ push(rax);
+    __ popf();
+    __ pushf();
+    __ pop(rax);
+    __ cmpptr(rax, rcx);
+    __ jccb(Assembler::notEqual, detect_486);
+
+    __ movl(rax, CPU_FAMILY_386);
+    __ jmp(done);
+
+    //
+    // If we are unable to change the ID flag, we have a 486 which does
+    // not support the "cpuid" instruction.
+    //
+    __ bind(detect_486);
+    __ mov(rax, rcx);
+    __ xorl(rax, HS_EFL_ID);
+    __ push(rax);
+    __ popf();
+    __ pushf();
+    __ pop(rax);
+    __ cmpptr(rcx, rax);
+    __ jccb(Assembler::notEqual, detect_586);
+
+    __ bind(cpu486);
+    __ movl(rax, CPU_FAMILY_486);
+    __ jmp(done);
+
+    //
+    // At this point, we have a chip which supports the "cpuid" instruction
+    //
+    __ bind(detect_586);
+    __ xorl(rax, rax);
+    __ cpuid();
+    __ orl(rax, rax);
+    __ jcc(Assembler::equal, cpu486);   // if cpuid doesn't support an input
+                                        // value of at least 1, we give up and
+                                        // assume a 486
+
+    //
+    // Extended cpuid(0x80000000) for processor brand string detection
+    //
+    __ bind(ext_cpuid);
+    __ movl(rax, CPUID_EXTENDED_FN);
+    __ cpuid();
+    __ cmpl(rax, CPUID_EXTENDED_FN_4);
+    __ jcc(Assembler::below, done);
+
+    //
+    // Extended cpuid(0x80000002)  // first 16 bytes in brand string
+    //
+    __ movl(rax, CPUID_EXTENDED_FN_2);
+    __ cpuid();
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_0_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_1_offset())));
+    __ movl(Address(rsi, 0), rbx);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_2_offset())));
+    __ movl(Address(rsi, 0), rcx);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_3_offset())));
+    __ movl(Address(rsi,0), rdx);
+
+    //
+    // Extended cpuid(0x80000003) // next 16 bytes in brand string
+    //
+    __ movl(rax, CPUID_EXTENDED_FN_3);
+    __ cpuid();
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_4_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_5_offset())));
+    __ movl(Address(rsi, 0), rbx);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_6_offset())));
+    __ movl(Address(rsi, 0), rcx);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_7_offset())));
+    __ movl(Address(rsi,0), rdx);
+
+    //
+    // Extended cpuid(0x80000004) // last 16 bytes in brand string
+    //
+    __ movl(rax, CPUID_EXTENDED_FN_4);
+    __ cpuid();
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_8_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_9_offset())));
+    __ movl(Address(rsi, 0), rbx);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_10_offset())));
+    __ movl(Address(rsi, 0), rcx);
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_11_offset())));
+    __ movl(Address(rsi,0), rdx);
+
+    //
+    // return
+    //
+    __ bind(done);
+    __ popf();
+    __ pop(rsi);
+    __ pop(rbx);
+    __ pop(rbp);
+    __ ret(0);
+
+#   undef __
+
+    return start;
+  };
+};
+
+
+// VM_Version_Ext statics
+const size_t VM_Version_Ext::VENDOR_LENGTH = 13;
+const size_t VM_Version_Ext::CPU_EBS_MAX_LENGTH = (3 * 4 * 4 + 1);
+const size_t VM_Version_Ext::CPU_TYPE_DESC_BUF_SIZE = 256;
+const size_t VM_Version_Ext::CPU_DETAILED_DESC_BUF_SIZE = 4096;
+char* VM_Version_Ext::_cpu_brand_string = NULL;
+jlong VM_Version_Ext::_max_qualified_cpu_frequency = 0;
+
+int VM_Version_Ext::_no_of_threads = 0;
+int VM_Version_Ext::_no_of_cores = 0;
+int VM_Version_Ext::_no_of_packages = 0;
+
+void VM_Version_Ext::initialize(void) {
+  ResourceMark rm;
+
+  cpuid_brand_string_stub_blob = BufferBlob::create("getCPUIDBrandString_stub", cpuid_brand_string_stub_size);
+  if (cpuid_brand_string_stub_blob == NULL) {
+    vm_exit_during_initialization("Unable to allocate getCPUIDBrandString_stub");
+  }
+  CodeBuffer c(cpuid_brand_string_stub_blob);
+  VM_Version_Ext_StubGenerator g(&c);
+  getCPUIDBrandString_stub = CAST_TO_FN_PTR(getCPUIDBrandString_stub_t,
+                                   g.generate_getCPUIDBrandString());
+}
+
+const char* VM_Version_Ext::cpu_model_description(void) {
+  uint32_t cpu_family = extended_cpu_family();
+  uint32_t cpu_model = extended_cpu_model();
+  const char* model = NULL;
+
+  if (cpu_family == CPU_FAMILY_PENTIUMPRO) {
+    for (uint32_t i = 0; i <= cpu_model; i++) {
+      model = _model_id_pentium_pro[i];
+      if (model == NULL) {
+        break;
+      }
+    }
+  }
+  return model;
+}
+
+const char* VM_Version_Ext::cpu_brand_string(void) {
+  if (_cpu_brand_string == NULL) {
+    _cpu_brand_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_EBS_MAX_LENGTH, mtInternal);
+    if (NULL == _cpu_brand_string) {
+      return NULL;
+    }
+    int ret_val = cpu_extended_brand_string(_cpu_brand_string, CPU_EBS_MAX_LENGTH);
+    if (ret_val != OS_OK) {
+      FREE_C_HEAP_ARRAY(char, _cpu_brand_string, mtInternal);
+      _cpu_brand_string = NULL;
+    }
+  }
+  return _cpu_brand_string;
+}
+
+const char* VM_Version_Ext::cpu_brand(void) {
+  const char*  brand  = NULL;
+
+  if ((_cpuid_info.std_cpuid1_ebx.value & 0xFF) > 0) {
+    int brand_num = _cpuid_info.std_cpuid1_ebx.value & 0xFF;
+    brand = _brand_id[0];
+    for (int i = 0; brand != NULL && i <= brand_num; i += 1) {
+      brand = _brand_id[i];
+    }
+  }
+  return brand;
+}
+
+bool VM_Version_Ext::cpu_is_em64t(void) {
+  return ((_cpuid_info.ext_cpuid1_edx.value & INTEL64_FLAG) == INTEL64_FLAG);
+}
+
+bool VM_Version_Ext::is_netburst(void) {
+  return (is_intel() && (extended_cpu_family() == CPU_FAMILY_PENTIUM_4));
+}
+
+bool VM_Version_Ext::supports_tscinv_ext(void) {
+  if (!supports_tscinv_bit()) {
+    return false;
+  }
+
+  if (is_intel()) {
+    return true;
+  }
+
+  if (is_amd()) {
+    return !is_amd_Barcelona();
+  }
+
+  return false;
+}
+
+void VM_Version_Ext::resolve_cpu_information_details(void) {
+
+  // in future we want to base this information on proper cpu
+  // and cache topology enumeration such as:
+  // Intel 64 Architecture Processor Topology Enumeration
+  // which supports system cpu and cache topology enumeration
+  // either using 2xAPICIDs or initial APICIDs
+
+  // currently only rough cpu information estimates
+  // which will not necessarily reflect the exact configuration of the system
+
+  // this is the number of logical hardware threads
+  // visible to the operating system
+  _no_of_threads = os::processor_count();
+
+  // find out number of threads per cpu package
+  int threads_per_package = threads_per_core() * cores_per_cpu();
+
+  // use amount of threads visible to the process in order to guess number of sockets
+  _no_of_packages = _no_of_threads / threads_per_package;
+
+  // process might only see a subset of the total number of threads
+  // from a single processor package. Virtualization/resource management for example.
+  // If so then just write a hard 1 as num of pkgs.
+  if (0 == _no_of_packages) {
+    _no_of_packages = 1;
+  }
+
+  // estimate the number of cores
+  _no_of_cores = cores_per_cpu() * _no_of_packages;
+}
+
+int VM_Version_Ext::number_of_threads(void) {
+  if (_no_of_threads == 0) {
+   resolve_cpu_information_details();
+  }
+  return _no_of_threads;
+}
+
+int VM_Version_Ext::number_of_cores(void) {
+  if (_no_of_cores == 0) {
+    resolve_cpu_information_details();
+  }
+  return _no_of_cores;
+}
+
+int VM_Version_Ext::number_of_sockets(void) {
+  if (_no_of_packages == 0) {
+    resolve_cpu_information_details();
+  }
+  return _no_of_packages;
+}
+
+const char* VM_Version_Ext::cpu_family_description(void) {
+  int cpu_family_id = extended_cpu_family();
+  if (is_amd()) {
+    return _family_id_amd[cpu_family_id];
+  }
+  if (is_intel()) {
+    if (cpu_family_id == CPU_FAMILY_PENTIUMPRO) {
+      return cpu_model_description();
+    }
+    return _family_id_intel[cpu_family_id];
+  }
+  return "Unknown x86";
+}
+
+int VM_Version_Ext::cpu_type_description(char* const buf, size_t buf_len) {
+  assert(buf != NULL, "buffer is NULL!");
+  assert(buf_len >= CPU_TYPE_DESC_BUF_SIZE, "buffer len should at least be == CPU_TYPE_DESC_BUF_SIZE!");
+
+  const char* cpu_type = NULL;
+  const char* x64 = NULL;
+
+  if (is_intel()) {
+    cpu_type = "Intel";
+    x64 = cpu_is_em64t() ? " Intel64" : "";
+  } else if (is_amd()) {
+    cpu_type = "AMD";
+    x64 = cpu_is_em64t() ? " AMD64" : "";
+  } else {
+    cpu_type = "Unknown x86";
+    x64 = cpu_is_em64t() ? " x86_64" : "";
+  }
+
+  jio_snprintf(buf, buf_len, "%s %s%s SSE SSE2%s%s%s%s%s%s%s%s",
+    cpu_type,
+    cpu_family_description(),
+    supports_ht() ? " (HT)" : "",
+    supports_sse3() ? " SSE3" : "",
+    supports_ssse3() ? " SSSE3" : "",
+    supports_sse4_1() ? " SSE4.1" : "",
+    supports_sse4_2() ? " SSE4.2" : "",
+    supports_sse4a() ? " SSE4A" : "",
+    is_netburst() ? " Netburst" : "",
+    is_intel_family_core() ? " Core" : "",
+    x64);
+
+  return OS_OK;
+}
+
+int VM_Version_Ext::cpu_extended_brand_string(char* const buf, size_t buf_len) {
+  assert(buf != NULL, "buffer is NULL!");
+  assert(buf_len >= CPU_EBS_MAX_LENGTH, "buffer len should at least be == CPU_EBS_MAX_LENGTH!");
+  assert(getCPUIDBrandString_stub != NULL, "not initialized");
+
+  // invoke newly generated asm code to fetch CPU Brand String
+  getCPUIDBrandString_stub(&_cpuid_info);
+
+  // fetch results into buffer
+  *((uint32_t*) &buf[0])  = _cpuid_info.proc_name_0;
+  *((uint32_t*) &buf[4])  = _cpuid_info.proc_name_1;
+  *((uint32_t*) &buf[8])  = _cpuid_info.proc_name_2;
+  *((uint32_t*) &buf[12]) = _cpuid_info.proc_name_3;
+  *((uint32_t*) &buf[16]) = _cpuid_info.proc_name_4;
+  *((uint32_t*) &buf[20]) = _cpuid_info.proc_name_5;
+  *((uint32_t*) &buf[24]) = _cpuid_info.proc_name_6;
+  *((uint32_t*) &buf[28]) = _cpuid_info.proc_name_7;
+  *((uint32_t*) &buf[32]) = _cpuid_info.proc_name_8;
+  *((uint32_t*) &buf[36]) = _cpuid_info.proc_name_9;
+  *((uint32_t*) &buf[40]) = _cpuid_info.proc_name_10;
+  *((uint32_t*) &buf[44]) = _cpuid_info.proc_name_11;
+
+  return OS_OK;
+}
+
+size_t VM_Version_Ext::cpu_write_support_string(char* const buf, size_t buf_len) {
+  guarantee(buf != NULL, "buffer is NULL!");
+  guarantee(buf_len > 0, "buffer len not enough!");
+
+  unsigned int flag = 0;
+  unsigned int fi = 0;
+  size_t       written = 0;
+  const char*  prefix = "";
+
+#define WRITE_TO_BUF(string)                                                          \
+  {                                                                                   \
+    int res = jio_snprintf(&buf[written], buf_len - written, "%s%s", prefix, string); \
+    if (res < 0) {                                                                    \
+      return buf_len - 1;                                                             \
+    }                                                                                 \
+    written += res;                                                                   \
+    if (prefix[0] == '\0') {                                                          \
+      prefix = ", ";                                                                  \
+    }                                                                                 \
+  }
+
+  for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) {
+    if (flag == HTT_FLAG && (((_cpuid_info.std_cpuid1_ebx.value >> 16) & 0xff) <= 1)) {
+      continue; /* no hyperthreading */
+    } else if (flag == SEP_FLAG && (cpu_family() == CPU_FAMILY_PENTIUMPRO && ((_cpuid_info.std_cpuid1_eax.value & 0xff) < 0x33))) {
+      continue; /* no fast system call */
+    }
+    if ((_cpuid_info.std_cpuid1_edx.value & flag) && strlen(_feature_edx_id[fi]) > 0) {
+      WRITE_TO_BUF(_feature_edx_id[fi]);
+    }
+  }
+
+  for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) {
+    if ((_cpuid_info.std_cpuid1_ecx.value & flag) && strlen(_feature_ecx_id[fi]) > 0) {
+      WRITE_TO_BUF(_feature_ecx_id[fi]);
+    }
+  }
+
+  for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) {
+    if ((_cpuid_info.ext_cpuid1_ecx.value & flag) && strlen(_feature_extended_ecx_id[fi]) > 0) {
+      WRITE_TO_BUF(_feature_extended_ecx_id[fi]);
+    }
+  }
+
+  for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) {
+    if ((_cpuid_info.ext_cpuid1_edx.value & flag) && strlen(_feature_extended_edx_id[fi]) > 0) {
+      WRITE_TO_BUF(_feature_extended_edx_id[fi]);
+    }
+  }
+
+  if (supports_tscinv_bit()) {
+      WRITE_TO_BUF("Invariant TSC");
+  }
+
+  return written;
+}
+
+/**
+ * Write a detailed description of the cpu to a given buffer, including
+ * feature set.
+ */
+int VM_Version_Ext::cpu_detailed_description(char* const buf, size_t buf_len) {
+  assert(buf != NULL, "buffer is NULL!");
+  assert(buf_len >= CPU_DETAILED_DESC_BUF_SIZE, "buffer len should at least be == CPU_DETAILED_DESC_BUF_SIZE!");
+
+  static const char* unknown = "<unknown>";
+  char               vendor_id[VENDOR_LENGTH];
+  const char*        family = NULL;
+  const char*        model = NULL;
+  const char*        brand = NULL;
+  int                outputLen = 0;
+
+  family = cpu_family_description();
+  if (family == NULL) {
+    family = unknown;
+  }
+
+  model = cpu_model_description();
+  if (model == NULL) {
+    model = unknown;
+  }
+
+  brand = cpu_brand_string();
+
+  if (brand == NULL) {
+    brand = cpu_brand();
+    if (brand == NULL) {
+      brand = unknown;
+    }
+  }
+
+  *((uint32_t*) &vendor_id[0]) = _cpuid_info.std_vendor_name_0;
+  *((uint32_t*) &vendor_id[4]) = _cpuid_info.std_vendor_name_2;
+  *((uint32_t*) &vendor_id[8]) = _cpuid_info.std_vendor_name_1;
+  vendor_id[VENDOR_LENGTH-1] = '\0';
+
+  outputLen = jio_snprintf(buf, buf_len, "Brand: %s, Vendor: %s\n"
+    "Family: %s (0x%x), Model: %s (0x%x), Stepping: 0x%x\n"
+    "Ext. family: 0x%x, Ext. model: 0x%x, Type: 0x%x, Signature: 0x%8.8x\n"
+    "Features: ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n"
+    "Ext. features: eax: 0x%8.8x, ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n"
+    "Supports: ",
+    brand,
+    vendor_id,
+    family,
+    extended_cpu_family(),
+    model,
+    extended_cpu_model(),
+    cpu_stepping(),
+    _cpuid_info.std_cpuid1_eax.bits.ext_family,
+    _cpuid_info.std_cpuid1_eax.bits.ext_model,
+    _cpuid_info.std_cpuid1_eax.bits.proc_type,
+    _cpuid_info.std_cpuid1_eax.value,
+    _cpuid_info.std_cpuid1_ebx.value,
+    _cpuid_info.std_cpuid1_ecx.value,
+    _cpuid_info.std_cpuid1_edx.value,
+    _cpuid_info.ext_cpuid1_eax,
+    _cpuid_info.ext_cpuid1_ebx,
+    _cpuid_info.ext_cpuid1_ecx,
+    _cpuid_info.ext_cpuid1_edx);
+
+  if (outputLen < 0 || (size_t) outputLen >= buf_len - 1) {
+    if (buf_len > 0) { buf[buf_len-1] = '\0'; }
+    return OS_ERR;
+  }
+
+  cpu_write_support_string(&buf[outputLen], buf_len - outputLen);
+
+  return OS_OK;
+}
+
+const char* VM_Version_Ext::cpu_name(void) {
+  char cpu_type_desc[CPU_TYPE_DESC_BUF_SIZE];
+  size_t cpu_desc_len = sizeof(cpu_type_desc);
+
+  cpu_type_description(cpu_type_desc, cpu_desc_len);
+  char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, cpu_desc_len, mtTracing);
+  if (NULL == tmp) {
+    return NULL;
+  }
+  strncpy(tmp, cpu_type_desc, cpu_desc_len);
+  return tmp;
+}
+
+const char* VM_Version_Ext::cpu_description(void) {
+  char cpu_detailed_desc_buffer[CPU_DETAILED_DESC_BUF_SIZE];
+  size_t cpu_detailed_desc_len = sizeof(cpu_detailed_desc_buffer);
+
+  cpu_detailed_description(cpu_detailed_desc_buffer, cpu_detailed_desc_len);
+
+  char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, cpu_detailed_desc_len, mtTracing);
+
+  if (NULL == tmp) {
+    return NULL;
+  }
+
+  strncpy(tmp, cpu_detailed_desc_buffer, cpu_detailed_desc_len);
+  return tmp;
+}
+
+/**
+ *  See Intel Application note 485 (chapter 10) for details
+ *  on frequency extraction from cpu brand string.
+ *  http://www.intel.com/content/dam/www/public/us/en/documents/application-notes/processor-identification-cpuid-instruction-note.pdf
+ *
+ */
+jlong VM_Version_Ext::max_qualified_cpu_freq_from_brand_string(void) {
+  // get brand string
+  const char* const brand_string = cpu_brand_string();
+  if (brand_string == NULL) {
+    return 0;
+  }
+
+  const u8 MEGA = 1000000;
+  u8 multiplier = 0;
+  jlong frequency = 0;
+
+  // the frequency information in the cpu brand string
+  // is given in either of two formats "x.xxyHz" or "xxxxyHz",
+  // where y=M,G,T and x is digits
+  const char* Hz_location = strchr(brand_string, 'H');
+
+  if (Hz_location != NULL) {
+    if (*(Hz_location + 1) == 'z') {
+      // switch on y in "yHz"
+      switch(*(Hz_location - 1)) {
+        case 'M' :
+          // Set multiplier to frequency is in Hz
+          multiplier = MEGA;
+          break;
+        case 'G' :
+          multiplier = MEGA * 1000;
+          break;
+        case 'T' :
+          multiplier = MEGA * 1000 * 1000;
+          break;
+      }
+    }
+  }
+
+  if (multiplier > 0) {
+    // compute frequency (in Hz) from brand string
+    if (*(Hz_location - 4) == '.') { // if format is "x.xx"
+      frequency =  (jlong)(*(Hz_location - 5) - '0') * (multiplier);
+      frequency += (jlong)(*(Hz_location - 3) - '0') * (multiplier / 10);
+      frequency += (jlong)(*(Hz_location - 2) - '0') * (multiplier / 100);
+    } else { // format is "xxxx"
+      frequency =  (jlong)(*(Hz_location - 5) - '0') * 1000;
+      frequency += (jlong)(*(Hz_location - 4) - '0') * 100;
+      frequency += (jlong)(*(Hz_location - 3) - '0') * 10;
+      frequency += (jlong)(*(Hz_location - 2) - '0');
+      frequency *= multiplier;
+    }
+  }
+  return frequency;
+}
+
+
+jlong VM_Version_Ext::maximum_qualified_cpu_frequency(void) {
+  if (_max_qualified_cpu_frequency == 0) {
+    _max_qualified_cpu_frequency = max_qualified_cpu_freq_from_brand_string();
+  }
+  return _max_qualified_cpu_frequency;
+}
+
+const char* const VM_Version_Ext::_family_id_intel[] = {
+  "8086/8088",
+  "",
+  "286",
+  "386",
+  "486",
+  "Pentium",
+  "Pentium Pro",   //or Pentium-M/Woodcrest depeding on model
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Pentium 4"
+};
+
+const char* const VM_Version_Ext::_family_id_amd[] = {
+  "",
+  "",
+  "",
+  "",
+  "5x86",
+  "K5/K6",
+  "Athlon/AthlonXP",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Opteron/Athlon64",
+  "Opteron QC/Phenom"  // Barcelona et.al.
+};
+// Partially from Intel 64 and IA-32 Architecture Software Developer's Manual,
+// September 2013, Vol 3C Table 35-1
+const char* const VM_Version_Ext::_model_id_pentium_pro[] = {
+  "",
+  "Pentium Pro",
+  "",
+  "Pentium II model 3",
+  "",
+  "Pentium II model 5/Xeon/Celeron",
+  "Celeron",
+  "Pentium III/Pentium III Xeon",
+  "Pentium III/Pentium III Xeon",
+  "Pentium M model 9",    // Yonah
+  "Pentium III, model A",
+  "Pentium III, model B",
+  "",
+  "Pentium M model D",    // Dothan
+  "",
+  "Core 2",               // 0xf Woodcrest/Conroe/Merom/Kentsfield/Clovertown
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Celeron",              // 0x16 Celeron 65nm
+  "Core 2",               // 0x17 Penryn / Harpertown
+  "",
+  "",
+  "Core i7",              // 0x1A CPU_MODEL_NEHALEM_EP
+  "Atom",                 // 0x1B Z5xx series Silverthorn
+  "",
+  "Core 2",               // 0x1D Dunnington (6-core)
+  "Nehalem",              // 0x1E CPU_MODEL_NEHALEM
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Westmere",             // 0x25 CPU_MODEL_WESTMERE
+  "",
+  "",
+  "",                     // 0x28
+  "",
+  "Sandy Bridge",         // 0x2a "2nd Generation Intel Core i7, i5, i3"
+  "",
+  "Westmere-EP",          // 0x2c CPU_MODEL_WESTMERE_EP
+  "Sandy Bridge-EP",      // 0x2d CPU_MODEL_SANDYBRIDGE_EP
+  "Nehalem-EX",           // 0x2e CPU_MODEL_NEHALEM_EX
+  "Westmere-EX",          // 0x2f CPU_MODEL_WESTMERE_EX
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Ivy Bridge",           // 0x3a
+  "",
+  "Haswell",              // 0x3c "4th Generation Intel Core Processor"
+  "",                     // 0x3d "Next Generation Intel Core Processor"
+  "Ivy Bridge-EP",        // 0x3e "Next Generation Intel Xeon Processor E7 Family"
+  "",                     // 0x3f "Future Generation Intel Xeon Processor"
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Haswell",              // 0x45 "4th Generation Intel Core Processor"
+  "Haswell",              // 0x46 "4th Generation Intel Core Processor"
+  NULL
+};
+
+/* Brand ID is for back compability
+ * Newer CPUs uses the extended brand string */
+const char* const VM_Version_Ext::_brand_id[] = {
+  "",
+  "Celeron processor",
+  "Pentium III processor",
+  "Intel Pentium III Xeon processor",
+  "",
+  "",
+  "",
+  "",
+  "Intel Pentium 4 processor",
+  NULL
+};
+
+
+const char* const VM_Version_Ext::_feature_edx_id[] = {
+  "On-Chip FPU",
+  "Virtual Mode Extensions",
+  "Debugging Extensions",
+  "Page Size Extensions",
+  "Time Stamp Counter",
+  "Model Specific Registers",
+  "Physical Address Extension",
+  "Machine Check Exceptions",
+  "CMPXCHG8B Instruction",
+  "On-Chip APIC",
+  "",
+  "Fast System Call",
+  "Memory Type Range Registers",
+  "Page Global Enable",
+  "Machine Check Architecture",
+  "Conditional Mov Instruction",
+  "Page Attribute Table",
+  "36-bit Page Size Extension",
+  "Processor Serial Number",
+  "CLFLUSH Instruction",
+  "",
+  "Debug Trace Store feature",
+  "ACPI registers in MSR space",
+  "Intel Architecture MMX Technology",
+  "Fast Float Point Save and Restore",
+  "Streaming SIMD extensions",
+  "Streaming SIMD extensions 2",
+  "Self-Snoop",
+  "Hyper Threading",
+  "Thermal Monitor",
+  "",
+  "Pending Break Enable"
+};
+
+const char* const VM_Version_Ext::_feature_extended_edx_id[] = {
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "SYSCALL/SYSRET",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Execute Disable Bit",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "RDTSCP",
+  "",
+  "Intel 64 Architecture",
+  "",
+  ""
+};
+
+const char* const VM_Version_Ext::_feature_ecx_id[] = {
+  "Streaming SIMD Extensions 3",
+  "PCLMULQDQ",
+  "64-bit DS Area",
+  "MONITOR/MWAIT instructions",
+  "CPL Qualified Debug Store",
+  "Virtual Machine Extensions",
+  "Safer Mode Extensions",
+  "Enhanced Intel SpeedStep technology",
+  "Thermal Monitor 2",
+  "Supplemental Streaming SIMD Extensions 3",
+  "L1 Context ID",
+  "",
+  "Fused Multiply-Add",
+  "CMPXCHG16B",
+  "xTPR Update Control",
+  "Perfmon and Debug Capability",
+  "",
+  "Process-context identifiers",
+  "Direct Cache Access",
+  "Streaming SIMD extensions 4.1",
+  "Streaming SIMD extensions 4.2",
+  "x2APIC",
+  "MOVBE",
+  "Popcount instruction",
+  "TSC-Deadline",
+  "AESNI",
+  "XSAVE",
+  "OSXSAVE",
+  "AVX",
+  "F16C",
+  "RDRAND",
+  ""
+};
+
+const char* const VM_Version_Ext::_feature_extended_ecx_id[] = {
+  "LAHF/SAHF instruction support",
+  "Core multi-processor leagacy mode",
+  "",
+  "",
+  "",
+  "Advanced Bit Manipulations: LZCNT",
+  "SSE4A: MOVNTSS, MOVNTSD, EXTRQ, INSERTQ",
+  "Misaligned SSE mode",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  ""
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/vm_version_ext_x86.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_VM_VERSION_EXT_X86_HPP
+#define CPU_X86_VM_VM_VERSION_EXT_X86_HPP
+
+#include "utilities/macros.hpp"
+#include "vm_version_x86.hpp"
+
+class VM_Version_Ext : public VM_Version {
+ private:
+  static const size_t      VENDOR_LENGTH;
+  static const size_t      CPU_EBS_MAX_LENGTH;
+  static const size_t      CPU_TYPE_DESC_BUF_SIZE;
+  static const size_t      CPU_DETAILED_DESC_BUF_SIZE;
+
+  static const char* const _family_id_intel[];
+  static const char* const _family_id_amd[];
+  static const char* const _brand_id[];
+  static const char* const _model_id_pentium_pro[];
+
+  static const char* const _feature_edx_id[];
+  static const char* const _feature_extended_edx_id[];
+  static const char* const _feature_ecx_id[];
+  static const char* const _feature_extended_ecx_id[];
+
+  static int               _no_of_threads;
+  static int               _no_of_cores;
+  static int               _no_of_packages;
+  static char*             _cpu_brand_string;
+  static jlong             _max_qualified_cpu_frequency;
+
+  static const char* cpu_family_description(void);
+  static const char* cpu_model_description(void);
+  static const char* cpu_brand(void);
+  static const char* cpu_brand_string(void);
+
+  static int cpu_type_description(char* const buf, size_t buf_len);
+  static int cpu_detailed_description(char* const buf, size_t buf_len);
+  static int cpu_extended_brand_string(char* const buf, size_t buf_len);
+
+  static bool cpu_is_em64t(void);
+  static bool is_netburst(void);
+
+  // Returns bytes written excluding termninating null byte.
+  static size_t cpu_write_support_string(char* const buf, size_t buf_len);
+  static void resolve_cpu_information_details(void);
+  static jlong max_qualified_cpu_freq_from_brand_string(void);
+
+ public:
+  // Offsets for cpuid asm stub brand string
+  static ByteSize proc_name_0_offset() { return byte_offset_of(CpuidInfo, proc_name_0); }
+  static ByteSize proc_name_1_offset() { return byte_offset_of(CpuidInfo, proc_name_1); }
+  static ByteSize proc_name_2_offset() { return byte_offset_of(CpuidInfo, proc_name_2); }
+  static ByteSize proc_name_3_offset() { return byte_offset_of(CpuidInfo, proc_name_3); }
+  static ByteSize proc_name_4_offset() { return byte_offset_of(CpuidInfo, proc_name_4); }
+  static ByteSize proc_name_5_offset() { return byte_offset_of(CpuidInfo, proc_name_5); }
+  static ByteSize proc_name_6_offset() { return byte_offset_of(CpuidInfo, proc_name_6); }
+  static ByteSize proc_name_7_offset() { return byte_offset_of(CpuidInfo, proc_name_7); }
+  static ByteSize proc_name_8_offset() { return byte_offset_of(CpuidInfo, proc_name_8); }
+  static ByteSize proc_name_9_offset() { return byte_offset_of(CpuidInfo, proc_name_9); }
+  static ByteSize proc_name_10_offset() { return byte_offset_of(CpuidInfo, proc_name_10); }
+  static ByteSize proc_name_11_offset() { return byte_offset_of(CpuidInfo, proc_name_11); }
+
+  static int number_of_threads(void);
+  static int number_of_cores(void);
+  static int number_of_sockets(void);
+
+  static jlong maximum_qualified_cpu_frequency(void);
+
+  static bool supports_tscinv_ext(void);
+
+  static const char* cpu_name(void);
+  static const char* cpu_description(void);
+
+  static void initialize();
+};
+
+#endif // CPU_X86_VM_VM_VERSION_EXT_X86_HPP
--- a/src/os/aix/vm/os_aix.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/aix/vm/os_aix.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1555,6 +1555,11 @@
   st->cr();
 }
 
+int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
+  // Not yet implemented.
+  return 0;
+}
+
 void os::print_memory_info(outputStream* st) {
 
   st->print_cr("Memory:");
@@ -2793,6 +2798,10 @@
   return ::read(fd, buf, nBytes);
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  return ::pread(fd, buf, nBytes, offset);
+}
+
 #define NANOSECS_PER_MILLISEC 1000000
 
 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/os_perf_aix.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "memory/allocation.inline.hpp"
+#include "os_aix.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/os_perf.hpp"
+
+#include CPU_HEADER(vm_version_ext)
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <pthread.h>
+#include <limits.h>
+
+/**
+   /proc/[number]/stat
+              Status information about the process.  This is used by ps(1).  It is defined in /usr/src/linux/fs/proc/array.c.
+
+              The fields, in order, with their proper scanf(3) format specifiers, are:
+
+              1. pid %d The process id.
+
+              2. comm %s
+                     The filename of the executable, in parentheses.  This is visible whether or not the executable is swapped out.
+
+              3. state %c
+                     One  character  from  the  string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible disk
+                     sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging.
+
+              4. ppid %d
+                     The PID of the parent.
+
+              5. pgrp %d
+                     The process group ID of the process.
+
+              6. session %d
+                     The session ID of the process.
+
+              7. tty_nr %d
+                     The tty the process uses.
+
+              8. tpgid %d
+                     The process group ID of the process which currently owns the tty that the process is connected to.
+
+              9. flags %lu
+                     The flags of the process.  The math bit is decimal 4, and the traced bit is decimal 10.
+
+              10. minflt %lu
+                     The number of minor faults the process has made which have not required loading a memory page from disk.
+
+              11. cminflt %lu
+                     The number of minor faults that the process's waited-for children have made.
+
+              12. majflt %lu
+                     The number of major faults the process has made which have required loading a memory page from disk.
+
+              13. cmajflt %lu
+                     The number of major faults that the process's waited-for children have made.
+
+              14. utime %lu
+                     The number of jiffies that this process has been scheduled in user mode.
+
+              15. stime %lu
+                     The number of jiffies that this process has been scheduled in kernel mode.
+
+              16. cutime %ld
+                     The number of jiffies that this process's waited-for children have been scheduled in user mode. (See also times(2).)
+
+              17. cstime %ld
+                     The number of jiffies that this process' waited-for children have been scheduled in kernel mode.
+
+              18. priority %ld
+                     The standard nice value, plus fifteen.  The value is never negative in the kernel.
+
+              19. nice %ld
+                     The nice value ranges from 19 (nicest) to -19 (not nice to others).
+
+              20. 0 %ld  This value is hard coded to 0 as a placeholder for a removed field.
+
+              21. itrealvalue %ld
+                     The time in jiffies before the next SIGALRM is sent to the process due to an interval timer.
+
+              22. starttime %lu
+                     The time in jiffies the process started after system boot.
+
+              23. vsize %lu
+                     Virtual memory size in bytes.
+
+              24. rss %ld
+                     Resident Set Size: number of pages the process has in real memory, minus 3 for administrative purposes. This is just the pages which  count
+                     towards text, data, or stack space.  This does not include pages which have not been demand-loaded in, or which are swapped out.
+
+              25. rlim %lu
+                     Current limit in bytes on the rss of the process (usually 4294967295 on i386).
+
+              26. startcode %lu
+                     The address above which program text can run.
+
+              27. endcode %lu
+                     The address below which program text can run.
+
+              28. startstack %lu
+                     The address of the start of the stack.
+
+              29. kstkesp %lu
+                     The current value of esp (stack pointer), as found in the kernel stack page for the process.
+
+              30. kstkeip %lu
+                     The current EIP (instruction pointer).
+
+              31. signal %lu
+                     The bitmap of pending signals (usually 0).
+
+              32. blocked %lu
+                     The bitmap of blocked signals (usually 0, 2 for shells).
+
+              33. sigignore %lu
+                     The bitmap of ignored signals.
+
+              34. sigcatch %lu
+                     The bitmap of catched signals.
+
+              35. wchan %lu
+                     This  is the "channel" in which the process is waiting.  It is the address of a system call, and can be looked up in a namelist if you need
+                     a textual name.  (If you have an up-to-date /etc/psdatabase, then try ps -l to see the WCHAN field in action.)
+
+              36. nswap %lu
+                     Number of pages swapped - not maintained.
+
+              37. cnswap %lu
+                     Cumulative nswap for child processes.
+
+              38. exit_signal %d
+                     Signal to be sent to parent when we die.
+
+              39. processor %d
+                     CPU number last executed on.
+
+
+
+ ///// SSCANF FORMAT STRING. Copy and use.
+
+field:        1  2  3  4  5  6  7  8  9   10  11  12  13  14  15  16  17  18  19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36  37  38 39
+format:       %d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %d %d
+
+
+*/
+
+/**
+ * For platforms that have them, when declaring
+ * a printf-style function,
+ *   formatSpec is the parameter number (starting at 1)
+ *       that is the format argument ("%d pid %s")
+ *   params is the parameter number where the actual args to
+ *       the format starts. If the args are in a va_list, this
+ *       should be 0.
+ */
+#ifndef PRINTF_ARGS
+#  define PRINTF_ARGS(formatSpec,  params) ATTRIBUTE_PRINTF(formatSpec, params)
+#endif
+
+#ifndef SCANF_ARGS
+#  define SCANF_ARGS(formatSpec,   params) ATTRIBUTE_SCANF(formatSpec, params)
+#endif
+
+#ifndef _PRINTFMT_
+#  define _PRINTFMT_
+#endif
+
+#ifndef _SCANFMT_
+#  define _SCANFMT_
+#endif
+
+
+struct CPUPerfTicks {
+  uint64_t  used;
+  uint64_t  usedKernel;
+  uint64_t  total;
+};
+
+typedef enum {
+  CPU_LOAD_VM_ONLY,
+  CPU_LOAD_GLOBAL,
+} CpuLoadTarget;
+
+enum {
+  UNDETECTED,
+  UNDETECTABLE,
+  LINUX26_NPTL,
+  BAREMETAL
+};
+
+struct CPUPerfCounters {
+  int   nProcs;
+  CPUPerfTicks jvmTicks;
+  CPUPerfTicks* cpus;
+};
+
+static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target);
+
+/** reads /proc/<pid>/stat data, with some checks and some skips.
+ *  Ensure that 'fmt' does _NOT_ contain the first two "%d %s"
+ */
+static int SCANF_ARGS(2, 0) vread_statdata(const char* procfile, _SCANFMT_ const char* fmt, va_list args) {
+  FILE*f;
+  int n;
+  char buf[2048];
+
+  if ((f = fopen(procfile, "r")) == NULL) {
+    return -1;
+  }
+
+  if ((n = fread(buf, 1, sizeof(buf), f)) != -1) {
+    char *tmp;
+
+    buf[n-1] = '\0';
+    /** skip through pid and exec name. */
+    if ((tmp = strrchr(buf, ')')) != NULL) {
+      // skip the ')' and the following space
+      // but check that buffer is long enough
+      tmp += 2;
+      if (tmp < buf + n) {
+        n = vsscanf(tmp, fmt, args);
+      }
+    }
+  }
+
+  fclose(f);
+
+  return n;
+}
+
+static int SCANF_ARGS(2, 3) read_statdata(const char* procfile, _SCANFMT_ const char* fmt, ...) {
+  int   n;
+  va_list args;
+
+  va_start(args, fmt);
+  n = vread_statdata(procfile, fmt, args);
+  va_end(args);
+  return n;
+}
+
+static FILE* open_statfile(void) {
+  FILE *f;
+
+  if ((f = fopen("/proc/stat", "r")) == NULL) {
+    static int haveWarned = 0;
+    if (!haveWarned) {
+      haveWarned = 1;
+    }
+  }
+  return f;
+}
+
+static void
+next_line(FILE *f) {
+  int c;
+  do {
+    c = fgetc(f);
+  } while (c != '\n' && c != EOF);
+}
+
+/**
+ * Return the total number of ticks since the system was booted.
+ * If the usedTicks parameter is not NULL, it will be filled with
+ * the number of ticks spent on actual processes (user, system or
+ * nice processes) since system boot. Note that this is the total number
+ * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is
+ * n times the number of ticks that has passed in clock time.
+ *
+ * Returns a negative value if the reading of the ticks failed.
+ */
+static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
+  FILE*         fh;
+  uint64_t      userTicks, niceTicks, systemTicks, idleTicks;
+  uint64_t      iowTicks = 0, irqTicks = 0, sirqTicks= 0;
+  int           logical_cpu = -1;
+  const int     expected_assign_count = (-1 == which_logical_cpu) ? 4 : 5;
+  int           n;
+
+  if ((fh = open_statfile()) == NULL) {
+    return OS_ERR;
+  }
+  if (-1 == which_logical_cpu) {
+    n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+            UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
+            &userTicks, &niceTicks, &systemTicks, &idleTicks,
+            &iowTicks, &irqTicks, &sirqTicks);
+  } else {
+    // Move to next line
+    next_line(fh);
+
+    // find the line for requested cpu faster to just iterate linefeeds?
+    for (int i = 0; i < which_logical_cpu; i++) {
+      next_line(fh);
+    }
+
+    n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+               UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
+               &logical_cpu, &userTicks, &niceTicks,
+               &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks);
+  }
+
+  fclose(fh);
+  if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
+#ifdef DEBUG_LINUX_PROC_STAT
+    vm_fprintf(stderr, "[stat] read failed");
+#endif
+    return OS_ERR;
+  }
+
+#ifdef DEBUG_LINUX_PROC_STAT
+  vm_fprintf(stderr, "[stat] read "
+          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
+          userTicks, niceTicks, systemTicks, idleTicks,
+          iowTicks, irqTicks, sirqTicks);
+#endif
+
+  pticks->used       = userTicks + niceTicks;
+  pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
+  pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
+                       iowTicks + irqTicks + sirqTicks;
+
+  return OS_OK;
+}
+
+
+static int get_systemtype(void) {
+  static int procEntriesType = UNDETECTED;
+  DIR *taskDir;
+
+  if (procEntriesType != UNDETECTED) {
+    return procEntriesType;
+  }
+
+  // Check whether we have a task subdirectory
+  if ((taskDir = opendir("/proc/self/task")) == NULL) {
+    procEntriesType = UNDETECTABLE;
+  } else {
+    // The task subdirectory exists; we're on a Linux >= 2.6 system
+    closedir(taskDir);
+    procEntriesType = LINUX26_NPTL;
+  }
+
+  return procEntriesType;
+}
+
+/** read user and system ticks from a named procfile, assumed to be in 'stat' format then. */
+static int read_ticks(const char* procfile, uint64_t* userTicks, uint64_t* systemTicks) {
+  return read_statdata(procfile, "%*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u " UINT64_FORMAT " " UINT64_FORMAT,
+    userTicks, systemTicks);
+}
+
+/**
+ * Return the number of ticks spent in any of the processes belonging
+ * to the JVM on any CPU.
+ */
+static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
+  uint64_t userTicks;
+  uint64_t systemTicks;
+
+  if (get_systemtype() != LINUX26_NPTL) {
+    return OS_ERR;
+  }
+
+  if (read_ticks("/proc/self/stat", &userTicks, &systemTicks) != 2) {
+    return OS_ERR;
+  }
+
+  // get the total
+  if (get_total_ticks(-1, pticks) != OS_OK) {
+    return OS_ERR;
+  }
+
+  pticks->used       = userTicks;
+  pticks->usedKernel = systemTicks;
+
+  return OS_OK;
+}
+
+/**
+ * Return the load of the CPU as a double. 1.0 means the CPU process uses all
+ * available time for user or system processes, 0.0 means the CPU uses all time
+ * being idle.
+ *
+ * Returns a negative value if there is a problem in determining the CPU load.
+ */
+static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target) {
+  uint64_t udiff, kdiff, tdiff;
+  CPUPerfTicks* pticks;
+  CPUPerfTicks  tmp;
+  double user_load;
+
+  *pkernelLoad = 0.0;
+
+  if (target == CPU_LOAD_VM_ONLY) {
+    pticks = &counters->jvmTicks;
+  } else if (-1 == which_logical_cpu) {
+    pticks = &counters->cpus[counters->nProcs];
+  } else {
+    pticks = &counters->cpus[which_logical_cpu];
+  }
+
+  tmp = *pticks;
+
+  if (target == CPU_LOAD_VM_ONLY) {
+    if (get_jvm_ticks(pticks) != OS_OK) {
+      return -1.0;
+    }
+  } else if (get_total_ticks(which_logical_cpu, pticks) != OS_OK) {
+    return -1.0;
+  }
+
+  // seems like we sometimes end up with less kernel ticks when
+  // reading /proc/self/stat a second time, timing issue between cpus?
+  if (pticks->usedKernel < tmp.usedKernel) {
+    kdiff = 0;
+  } else {
+    kdiff = pticks->usedKernel - tmp.usedKernel;
+  }
+  tdiff = pticks->total - tmp.total;
+  udiff = pticks->used - tmp.used;
+
+  if (tdiff == 0) {
+    return 0.0;
+  } else if (tdiff < (udiff + kdiff)) {
+    tdiff = udiff + kdiff;
+  }
+  *pkernelLoad = (kdiff / (double)tdiff);
+  // BUG9044876, normalize return values to sane values
+  *pkernelLoad = MAX2<double>(*pkernelLoad, 0.0);
+  *pkernelLoad = MIN2<double>(*pkernelLoad, 1.0);
+
+  user_load = (udiff / (double)tdiff);
+  user_load = MAX2<double>(user_load, 0.0);
+  user_load = MIN2<double>(user_load, 1.0);
+
+  return user_load;
+}
+
+static int SCANF_ARGS(1, 2) parse_stat(_SCANFMT_ const char* fmt, ...) {
+  FILE *f;
+  va_list args;
+
+  va_start(args, fmt);
+
+  if ((f = open_statfile()) == NULL) {
+    va_end(args);
+    return OS_ERR;
+  }
+  for (;;) {
+    char line[80];
+    if (fgets(line, sizeof(line), f) != NULL) {
+      if (vsscanf(line, fmt, args) == 1) {
+        fclose(f);
+        va_end(args);
+        return OS_OK;
+      }
+    } else {
+        fclose(f);
+        va_end(args);
+        return OS_ERR;
+    }
+  }
+}
+
+static int get_noof_context_switches(uint64_t* switches) {
+  return parse_stat("ctxt " UINT64_FORMAT "\n", switches);
+}
+
+/** returns boot time in _seconds_ since epoch */
+static int get_boot_time(uint64_t* time) {
+  return parse_stat("btime " UINT64_FORMAT "\n", time);
+}
+
+static int perf_context_switch_rate(double* rate) {
+  static pthread_mutex_t contextSwitchLock = PTHREAD_MUTEX_INITIALIZER;
+  static uint64_t      lastTime;
+  static uint64_t      lastSwitches;
+  static double        lastRate;
+
+  uint64_t lt = 0;
+  int res = 0;
+
+  if (lastTime == 0) {
+    uint64_t tmp;
+    if (get_boot_time(&tmp) < 0) {
+      return OS_ERR;
+    }
+    lt = tmp * 1000;
+  }
+
+  res = OS_OK;
+
+  pthread_mutex_lock(&contextSwitchLock);
+  {
+
+    uint64_t sw;
+    s8 t, d;
+
+    if (lastTime == 0) {
+      lastTime = lt;
+    }
+
+    t = os::javaTimeMillis();
+    d = t - lastTime;
+
+    if (d == 0) {
+      *rate = lastRate;
+    } else if (!get_noof_context_switches(&sw)) {
+      *rate      = ( (double)(sw - lastSwitches) / d ) * 1000;
+      lastRate     = *rate;
+      lastSwitches = sw;
+      lastTime     = t;
+    } else {
+      *rate = 0;
+      res   = OS_ERR;
+    }
+    if (*rate <= 0) {
+      *rate = 0;
+      lastRate = 0;
+    }
+  }
+  pthread_mutex_unlock(&contextSwitchLock);
+
+  return res;
+}
+
+class CPUPerformanceInterface::CPUPerformance : public CHeapObj<mtInternal> {
+  friend class CPUPerformanceInterface;
+ private:
+  CPUPerfCounters _counters;
+
+  int cpu_load(int which_logical_cpu, double* cpu_load);
+  int context_switch_rate(double* rate);
+  int cpu_load_total_process(double* cpu_load);
+  int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad);
+
+ public:
+  CPUPerformance();
+  bool initialize();
+  ~CPUPerformance();
+};
+
+CPUPerformanceInterface::CPUPerformance::CPUPerformance() {
+  _counters.nProcs = os::active_processor_count();
+  _counters.cpus = NULL;
+}
+
+bool CPUPerformanceInterface::CPUPerformance::initialize() {
+  size_t tick_array_size = (_counters.nProcs +1) * sizeof(CPUPerfTicks);
+  _counters.cpus = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
+  if (NULL == _counters.cpus) {
+    return false;
+  }
+  memset(_counters.cpus, 0, tick_array_size);
+
+  // For the CPU load total
+  get_total_ticks(-1, &_counters.cpus[_counters.nProcs]);
+
+  // For each CPU
+  for (int i = 0; i < _counters.nProcs; i++) {
+    get_total_ticks(i, &_counters.cpus[i]);
+  }
+  // For JVM load
+  get_jvm_ticks(&_counters.jvmTicks);
+
+  // initialize context switch system
+  // the double is only for init
+  double init_ctx_switch_rate;
+  perf_context_switch_rate(&init_ctx_switch_rate);
+
+  return true;
+}
+
+CPUPerformanceInterface::CPUPerformance::~CPUPerformance() {
+  if (_counters.cpus != NULL) {
+    FREE_C_HEAP_ARRAY(char, _counters.cpus);
+  }
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, double* cpu_load) {
+  double u, s;
+  u = get_cpu_load(which_logical_cpu, &_counters, &s, CPU_LOAD_GLOBAL);
+  if (u < 0) {
+    *cpu_load = 0.0;
+    return OS_ERR;
+  }
+  // Cap total systemload to 1.0
+  *cpu_load = MIN2<double>((u + s), 1.0);
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_load) {
+  double u, s;
+  u = get_cpu_load(-1, &_counters, &s, CPU_LOAD_VM_ONLY);
+  if (u < 0) {
+    *cpu_load = 0.0;
+    return OS_ERR;
+  }
+  *cpu_load = u + s;
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) {
+  double u, s, t;
+
+  assert(pjvmUserLoad != NULL, "pjvmUserLoad not inited");
+  assert(pjvmKernelLoad != NULL, "pjvmKernelLoad not inited");
+  assert(psystemTotalLoad != NULL, "psystemTotalLoad not inited");
+
+  u = get_cpu_load(-1, &_counters, &s, CPU_LOAD_VM_ONLY);
+  if (u < 0) {
+    *pjvmUserLoad = 0.0;
+    *pjvmKernelLoad = 0.0;
+    *psystemTotalLoad = 0.0;
+    return OS_ERR;
+  }
+
+  cpu_load(-1, &t);
+  // clamp at user+system and 1.0
+  if (u + s > t) {
+    t = MIN2<double>(u + s, 1.0);
+  }
+
+  *pjvmUserLoad = u;
+  *pjvmKernelLoad = s;
+  *psystemTotalLoad = t;
+
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::context_switch_rate(double* rate) {
+  return perf_context_switch_rate(rate);
+}
+
+CPUPerformanceInterface::CPUPerformanceInterface() {
+  _impl = NULL;
+}
+
+bool CPUPerformanceInterface::initialize() {
+  _impl = new CPUPerformanceInterface::CPUPerformance();
+  return NULL == _impl ? false : _impl->initialize();
+}
+
+CPUPerformanceInterface::~CPUPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+int CPUPerformanceInterface::cpu_load(int which_logical_cpu, double* cpu_load) const {
+  return _impl->cpu_load(which_logical_cpu, cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_load_total_process(double* cpu_load) const {
+  return _impl->cpu_load_total_process(cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) const {
+  return _impl->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotalLoad);
+}
+
+int CPUPerformanceInterface::context_switch_rate(double* rate) const {
+  return _impl->context_switch_rate(rate);
+}
+
+class SystemProcessInterface::SystemProcesses : public CHeapObj<mtInternal> {
+  friend class SystemProcessInterface;
+ private:
+  class ProcessIterator : public CHeapObj<mtInternal> {
+    friend class SystemProcessInterface::SystemProcesses;
+   private:
+    DIR*           _dir;
+    struct dirent* _entry;
+    bool           _valid;
+    char           _exeName[PATH_MAX];
+    char           _exePath[PATH_MAX];
+
+    ProcessIterator();
+    ~ProcessIterator();
+    bool initialize();
+
+    bool is_valid() const { return _valid; }
+    bool is_valid_entry(struct dirent* entry) const;
+    bool is_dir(const char* name) const;
+    int  fsize(const char* name, uint64_t& size) const;
+
+    char* allocate_string(const char* str) const;
+    void  get_exe_name();
+    char* get_exe_path();
+    char* get_cmdline();
+
+    int current(SystemProcess* process_info);
+    int next_process();
+  };
+
+  ProcessIterator* _iterator;
+  SystemProcesses();
+  bool initialize();
+  ~SystemProcesses();
+
+  //information about system processes
+  int system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const;
+};
+
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_dir(const char* name) const {
+  struct stat mystat;
+  int ret_val = 0;
+
+  ret_val = stat(name, &mystat);
+  if (ret_val < 0) {
+    return false;
+  }
+  ret_val = S_ISDIR(mystat.st_mode);
+  return ret_val > 0;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::fsize(const char* name, uint64_t& size) const {
+  assert(name != NULL, "name pointer is NULL!");
+  size = 0;
+  struct stat fbuf;
+
+  if (stat(name, &fbuf) < 0) {
+    return OS_ERR;
+  }
+  size = fbuf.st_size;
+  return OS_OK;
+}
+
+// if it has a numeric name, is a directory and has a 'stat' file in it
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_valid_entry(struct dirent* entry) const {
+  char buffer[PATH_MAX];
+  uint64_t size = 0;
+
+  if (atoi(entry->d_name) != 0) {
+    jio_snprintf(buffer, PATH_MAX, "/proc/%s", entry->d_name);
+    buffer[PATH_MAX - 1] = '\0';
+
+    if (is_dir(buffer)) {
+      jio_snprintf(buffer, PATH_MAX, "/proc/%s/stat", entry->d_name);
+      buffer[PATH_MAX - 1] = '\0';
+      if (fsize(buffer, size) != OS_ERR) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+// get exe-name from /proc/<pid>/stat
+void SystemProcessInterface::SystemProcesses::ProcessIterator::get_exe_name() {
+  FILE* fp;
+  char  buffer[PATH_MAX];
+
+  jio_snprintf(buffer, PATH_MAX, "/proc/%s/stat", _entry->d_name);
+  buffer[PATH_MAX - 1] = '\0';
+  if ((fp = fopen(buffer, "r")) != NULL) {
+    if (fgets(buffer, PATH_MAX, fp) != NULL) {
+      char* start, *end;
+      // exe-name is between the first pair of ( and )
+      start = strchr(buffer, '(');
+      if (start != NULL && start[1] != '\0') {
+        start++;
+        end = strrchr(start, ')');
+        if (end != NULL) {
+          size_t len;
+          len = MIN2<size_t>(end - start, sizeof(_exeName) - 1);
+          memcpy(_exeName, start, len);
+          _exeName[len] = '\0';
+        }
+      }
+    }
+    fclose(fp);
+  }
+}
+
+// get command line from /proc/<pid>/cmdline
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::get_cmdline() {
+  FILE* fp;
+  char  buffer[PATH_MAX];
+  char* cmdline = NULL;
+
+  jio_snprintf(buffer, PATH_MAX, "/proc/%s/cmdline", _entry->d_name);
+  buffer[PATH_MAX - 1] = '\0';
+  if ((fp = fopen(buffer, "r")) != NULL) {
+    size_t size = 0;
+    char   dummy;
+
+    // find out how long the file is (stat always returns 0)
+    while (fread(&dummy, 1, 1, fp) == 1) {
+      size++;
+    }
+    if (size > 0) {
+      cmdline = NEW_C_HEAP_ARRAY(char, size + 1, mtInternal);
+      if (cmdline != NULL) {
+        cmdline[0] = '\0';
+        if (fseek(fp, 0, SEEK_SET) == 0) {
+          if (fread(cmdline, 1, size, fp) == size) {
+            // the file has the arguments separated by '\0',
+            // so we translate '\0' to ' '
+            for (size_t i = 0; i < size; i++) {
+              if (cmdline[i] == '\0') {
+                cmdline[i] = ' ';
+              }
+            }
+            cmdline[size] = '\0';
+          }
+        }
+      }
+    }
+    fclose(fp);
+  }
+  return cmdline;
+}
+
+// get full path to exe from /proc/<pid>/exe symlink
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::get_exe_path() {
+  char buffer[PATH_MAX];
+
+  jio_snprintf(buffer, PATH_MAX, "/proc/%s/exe", _entry->d_name);
+  buffer[PATH_MAX - 1] = '\0';
+  return realpath(buffer, _exePath);
+}
+
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
+  if (str != NULL) {
+    size_t len = strlen(str);
+    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
+    strncpy(tmp, str, len);
+    tmp[len] = '\0';
+    return tmp;
+  }
+  return NULL;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::current(SystemProcess* process_info) {
+  if (!is_valid()) {
+    return OS_ERR;
+  }
+
+  process_info->set_pid(atoi(_entry->d_name));
+
+  get_exe_name();
+  process_info->set_name(allocate_string(_exeName));
+
+  if (get_exe_path() != NULL) {
+     process_info->set_path(allocate_string(_exePath));
+  }
+
+  char* cmdline = NULL;
+  cmdline = get_cmdline();
+  if (cmdline != NULL) {
+    process_info->set_command_line(allocate_string(cmdline));
+    FREE_C_HEAP_ARRAY(char, cmdline);
+  }
+
+  return OS_OK;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::next_process() {
+  if (!is_valid()) {
+    return OS_ERR;
+  }
+
+  do {
+    _entry = os::readdir(_dir);
+    if (_entry == NULL) {
+      // Error or reached end.  Could use errno to distinguish those cases.
+      _valid = false;
+      return OS_ERR;
+    }
+  } while(!is_valid_entry(_entry));
+
+  _valid = true;
+  return OS_OK;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::ProcessIterator() {
+  _dir = NULL;
+  _entry = NULL;
+  _valid = false;
+}
+
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::initialize() {
+  // Not yet implemented.
+  return false;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::~ProcessIterator() {
+  if (_dir != NULL) {
+    os::closedir(_dir);
+  }
+}
+
+SystemProcessInterface::SystemProcesses::SystemProcesses() {
+  _iterator = NULL;
+}
+
+bool SystemProcessInterface::SystemProcesses::initialize() {
+  _iterator = new SystemProcessInterface::SystemProcesses::ProcessIterator();
+  return NULL == _iterator ? false : _iterator->initialize();
+}
+
+SystemProcessInterface::SystemProcesses::~SystemProcesses() {
+  if (_iterator != NULL) {
+    delete _iterator;
+  }
+}
+
+int SystemProcessInterface::SystemProcesses::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const {
+  assert(system_processes != NULL, "system_processes pointer is NULL!");
+  assert(no_of_sys_processes != NULL, "system_processes counter pointers is NULL!");
+  assert(_iterator != NULL, "iterator is NULL!");
+
+  // initialize pointers
+  *no_of_sys_processes = 0;
+  *system_processes = NULL;
+
+  while (_iterator->is_valid()) {
+    SystemProcess* tmp = new SystemProcess();
+    _iterator->current(tmp);
+
+    //if already existing head
+    if (*system_processes != NULL) {
+      //move "first to second"
+      tmp->set_next(*system_processes);
+    }
+    // new head
+    *system_processes = tmp;
+    // increment
+    (*no_of_sys_processes)++;
+    // step forward
+    _iterator->next_process();
+  }
+  return OS_OK;
+}
+
+int SystemProcessInterface::system_processes(SystemProcess** system_procs, int* no_of_sys_processes) const {
+  return _impl->system_processes(system_procs, no_of_sys_processes);
+}
+
+SystemProcessInterface::SystemProcessInterface() {
+  _impl = NULL;
+}
+
+bool SystemProcessInterface::initialize() {
+  _impl = new SystemProcessInterface::SystemProcesses();
+  return NULL == _impl ? false : _impl->initialize();
+}
+
+SystemProcessInterface::~SystemProcessInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+CPUInformationInterface::CPUInformationInterface() {
+  _cpu_info = NULL;
+}
+
+bool CPUInformationInterface::initialize() {
+  _cpu_info = new CPUInformation();
+  if (NULL == _cpu_info) {
+    return false;
+  }
+  _cpu_info->set_number_of_hardware_threads(VM_Version_Ext::number_of_threads());
+  _cpu_info->set_number_of_cores(VM_Version_Ext::number_of_cores());
+  _cpu_info->set_number_of_sockets(VM_Version_Ext::number_of_sockets());
+  _cpu_info->set_cpu_name(VM_Version_Ext::cpu_name());
+  _cpu_info->set_cpu_description(VM_Version_Ext::cpu_description());
+
+  return true;
+}
+
+CPUInformationInterface::~CPUInformationInterface() {
+  if (_cpu_info != NULL) {
+    if (_cpu_info->cpu_name() != NULL) {
+      const char* cpu_name = _cpu_info->cpu_name();
+      FREE_C_HEAP_ARRAY(char, cpu_name);
+      _cpu_info->set_cpu_name(NULL);
+    }
+    if (_cpu_info->cpu_description() != NULL) {
+       const char* cpu_desc = _cpu_info->cpu_description();
+       FREE_C_HEAP_ARRAY(char, cpu_desc);
+      _cpu_info->set_cpu_description(NULL);
+    }
+    delete _cpu_info;
+  }
+}
+
+int CPUInformationInterface::cpu_information(CPUInformation& cpu_info) {
+  if (_cpu_info == NULL) {
+    return OS_ERR;
+  }
+
+  cpu_info = *_cpu_info; // shallow copy assignment
+  return OS_OK;
+}
+
+class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtInternal> {
+  friend class NetworkPerformanceInterface;
+ private:
+  NetworkPerformance();
+  NetworkPerformance(const NetworkPerformance& rhs); // no impl
+  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  bool initialize();
+  ~NetworkPerformance();
+  int network_utilization(NetworkInterface** network_interfaces) const;
+};
+
+NetworkPerformanceInterface::NetworkPerformance::NetworkPerformance() {
+
+}
+
+bool NetworkPerformanceInterface::NetworkPerformance::initialize() {
+  return true;
+}
+
+NetworkPerformanceInterface::NetworkPerformance::~NetworkPerformance() {
+}
+
+int NetworkPerformanceInterface::NetworkPerformance::network_utilization(NetworkInterface** network_interfaces) const
+{
+  return FUNCTIONALITY_NOT_IMPLEMENTED;
+}
+
+NetworkPerformanceInterface::NetworkPerformanceInterface() {
+  _impl = NULL;
+}
+
+NetworkPerformanceInterface::~NetworkPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+bool NetworkPerformanceInterface::initialize() {
+  _impl = new NetworkPerformanceInterface::NetworkPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+int NetworkPerformanceInterface::network_utilization(NetworkInterface** network_interfaces) const {
+  return _impl->network_utilization(network_interfaces);
+}
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/bsd/vm/os_bsd.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1690,6 +1690,53 @@
 #endif
 }
 
+int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
+#ifdef RTLD_DI_LINKMAP
+  Dl_info dli;
+  void *handle;
+  Link_map *map;
+  Link_map *p;
+
+  if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
+      dli.dli_fname == NULL) {
+    return 1;
+  }
+  handle = dlopen(dli.dli_fname, RTLD_LAZY);
+  if (handle == NULL) {
+    return 1;
+  }
+  dlinfo(handle, RTLD_DI_LINKMAP, &map);
+  if (map == NULL) {
+    dlclose(handle);
+    return 1;
+  }
+
+  while (map->l_prev != NULL)
+    map = map->l_prev;
+
+  while (map != NULL) {
+    // Value for top_address is returned as 0 since we don't have any information about module size
+    if (callback(map->l_name, (address)map->l_addr, (address)0, param)) {
+      dlclose(handle);
+      return 1;
+    }
+    map = map->l_next;
+  }
+
+  dlclose(handle);
+#elif defined(__APPLE__)
+  for (uint32_t i = 1; i < _dyld_image_count(); i++) {
+    // Value for top_address is returned as 0 since we don't have any information about module size
+    if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
+      return 1;
+    }
+  }
+  return 0;
+#else
+  return 1;
+#endif
+}
+
 void os::print_os_info_brief(outputStream* st) {
   st->print("Bsd");
 
@@ -2562,6 +2609,10 @@
   RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes));
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  RESTARTABLE_RETURN_INT(::pread(fd, buf, nBytes, offset));
+}
+
 // TODO-FIXME: reconcile Solaris' os::sleep with the bsd variation.
 // Solaris uses poll(), bsd uses park().
 // Poll() is likely a better choice, assuming that Thread.interrupt()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/bsd/vm/os_perf_bsd.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/os.hpp"
+#include "runtime/os_perf.hpp"
+#include "vm_version_ext_x86.hpp"
+
+#ifdef __APPLE__
+  #import <libproc.h>
+  #include <sys/time.h>
+  #include <sys/sysctl.h>
+  #include <mach/mach.h>
+  #include <mach/task_info.h>
+  #include <sys/socket.h>
+  #include <net/if.h>
+  #include <net/if_dl.h>
+  #include <net/route.h>
+#endif
+
+static const double NANOS_PER_SEC = 1000000000.0;
+
+class CPUPerformanceInterface::CPUPerformance : public CHeapObj<mtInternal> {
+   friend class CPUPerformanceInterface;
+ private:
+  long _total_cpu_nanos;
+  long _total_csr_nanos;
+  long _jvm_user_nanos;
+  long _jvm_system_nanos;
+  long _jvm_context_switches;
+  long _used_ticks;
+  long _total_ticks;
+  int  _active_processor_count;
+
+  bool now_in_nanos(long* resultp) {
+    timeval current_time;
+    if (gettimeofday(&current_time, NULL) != 0) {
+      // Error getting current time
+      return false;
+    }
+    *resultp = (long)(current_time.tv_sec * NANOS_PER_SEC + 1000L * current_time.tv_usec);
+    return true;
+  }
+
+  double normalize(double value) {
+    return MIN2<double>(MAX2<double>(value, 0.0), 1.0);
+  }
+  int cpu_load(int which_logical_cpu, double* cpu_load);
+  int context_switch_rate(double* rate);
+  int cpu_load_total_process(double* cpu_load);
+  int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad);
+
+  CPUPerformance(const CPUPerformance& rhs); // no impl
+  CPUPerformance& operator=(const CPUPerformance& rhs); // no impl
+ public:
+  CPUPerformance();
+  bool initialize();
+  ~CPUPerformance();
+};
+
+CPUPerformanceInterface::CPUPerformance::CPUPerformance() {
+  _total_cpu_nanos= 0;
+  _total_csr_nanos= 0;
+  _jvm_context_switches = 0;
+  _jvm_user_nanos = 0;
+  _jvm_system_nanos = 0;
+  _used_ticks = 0;
+  _total_ticks = 0;
+  _active_processor_count = 0;
+}
+
+bool CPUPerformanceInterface::CPUPerformance::initialize() {
+  return true;
+}
+
+CPUPerformanceInterface::CPUPerformance::~CPUPerformance() {
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, double* cpu_load) {
+  return FUNCTIONALITY_NOT_IMPLEMENTED;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_load) {
+#ifdef __APPLE__
+  host_name_port_t host = mach_host_self();
+  host_flavor_t flavor = HOST_CPU_LOAD_INFO;
+  mach_msg_type_number_t host_info_count = HOST_CPU_LOAD_INFO_COUNT;
+  host_cpu_load_info_data_t cpu_load_info;
+
+  kern_return_t kr = host_statistics(host, flavor, (host_info_t)&cpu_load_info, &host_info_count);
+  if (kr != KERN_SUCCESS) {
+    return OS_ERR;
+  }
+
+  long used_ticks  = cpu_load_info.cpu_ticks[CPU_STATE_USER] + cpu_load_info.cpu_ticks[CPU_STATE_NICE] + cpu_load_info.cpu_ticks[CPU_STATE_SYSTEM];
+  long total_ticks = used_ticks + cpu_load_info.cpu_ticks[CPU_STATE_IDLE];
+
+  if (_used_ticks == 0 || _total_ticks == 0) {
+    // First call, just set the values
+    _used_ticks  = used_ticks;
+    _total_ticks = total_ticks;
+    return OS_ERR;
+  }
+
+  long used_delta  = used_ticks - _used_ticks;
+  long total_delta = total_ticks - _total_ticks;
+
+  _used_ticks  = used_ticks;
+  _total_ticks = total_ticks;
+
+  if (total_delta == 0) {
+    // Avoid division by zero
+    return OS_ERR;
+  }
+
+  *cpu_load = (double)used_delta / total_delta;
+
+  return OS_OK;
+#else
+  return FUNCTIONALITY_NOT_IMPLEMENTED;
+#endif
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) {
+#ifdef __APPLE__
+  int result = cpu_load_total_process(psystemTotalLoad);
+  mach_port_t task = mach_task_self();
+  mach_msg_type_number_t task_info_count = TASK_INFO_MAX;
+  task_info_data_t task_info_data;
+  kern_return_t kr = task_info(task, TASK_ABSOLUTETIME_INFO, (task_info_t)task_info_data, &task_info_count);
+  if (kr != KERN_SUCCESS) {
+    return OS_ERR;
+  }
+  task_absolutetime_info_t absolutetime_info = (task_absolutetime_info_t)task_info_data;
+
+  int active_processor_count = os::active_processor_count();
+  long jvm_user_nanos = absolutetime_info->total_user;
+  long jvm_system_nanos = absolutetime_info->total_system;
+
+  long total_cpu_nanos;
+  if(!now_in_nanos(&total_cpu_nanos)) {
+    return OS_ERR;
+  }
+
+  if (_total_cpu_nanos == 0 || active_processor_count != _active_processor_count) {
+    // First call or change in active processor count
+    result = OS_ERR;
+  }
+
+  long delta_nanos = active_processor_count * (total_cpu_nanos - _total_cpu_nanos);
+  if (delta_nanos == 0) {
+    // Avoid division by zero
+    return OS_ERR;
+  }
+
+  *pjvmUserLoad = normalize((double)(jvm_user_nanos - _jvm_user_nanos)/delta_nanos);
+  *pjvmKernelLoad = normalize((double)(jvm_system_nanos - _jvm_system_nanos)/delta_nanos);
+
+  _active_processor_count = active_processor_count;
+  _total_cpu_nanos = total_cpu_nanos;
+  _jvm_user_nanos = jvm_user_nanos;
+  _jvm_system_nanos = jvm_system_nanos;
+
+  return result;
+#else
+  return FUNCTIONALITY_NOT_IMPLEMENTED;
+#endif
+}
+
+int CPUPerformanceInterface::CPUPerformance::context_switch_rate(double* rate) {
+#ifdef __APPLE__
+  mach_port_t task = mach_task_self();
+  mach_msg_type_number_t task_info_count = TASK_INFO_MAX;
+  task_info_data_t task_info_data;
+  kern_return_t kr = task_info(task, TASK_EVENTS_INFO, (task_info_t)task_info_data, &task_info_count);
+  if (kr != KERN_SUCCESS) {
+    return OS_ERR;
+  }
+
+  int result = OS_OK;
+  if (_total_csr_nanos == 0 || _jvm_context_switches == 0) {
+    // First call just set initial values.
+    result = OS_ERR;
+  }
+
+  long jvm_context_switches = ((task_events_info_t)task_info_data)->csw;
+
+  long total_csr_nanos;
+  if(!now_in_nanos(&total_csr_nanos)) {
+    return OS_ERR;
+  }
+  double delta_in_sec = (double)(total_csr_nanos - _total_csr_nanos) / NANOS_PER_SEC;
+  if (delta_in_sec == 0.0) {
+    // Avoid division by zero
+    return OS_ERR;
+  }
+
+  *rate = (jvm_context_switches - _jvm_context_switches) / delta_in_sec;
+
+  _jvm_context_switches = jvm_context_switches;
+  _total_csr_nanos = total_csr_nanos;
+
+  return result;
+#else
+  return FUNCTIONALITY_NOT_IMPLEMENTED;
+#endif
+}
+
+CPUPerformanceInterface::CPUPerformanceInterface() {
+  _impl = NULL;
+}
+
+bool CPUPerformanceInterface::initialize() {
+  _impl = new CPUPerformanceInterface::CPUPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+CPUPerformanceInterface::~CPUPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+int CPUPerformanceInterface::cpu_load(int which_logical_cpu, double* cpu_load) const {
+  return _impl->cpu_load(which_logical_cpu, cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_load_total_process(double* cpu_load) const {
+  return _impl->cpu_load_total_process(cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) const {
+  return _impl->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotalLoad);
+}
+
+int CPUPerformanceInterface::context_switch_rate(double* rate) const {
+  return _impl->context_switch_rate(rate);
+}
+
+class SystemProcessInterface::SystemProcesses : public CHeapObj<mtInternal> {
+  friend class SystemProcessInterface;
+ private:
+  SystemProcesses();
+  bool initialize();
+  SystemProcesses(const SystemProcesses& rhs); // no impl
+  SystemProcesses& operator=(const SystemProcesses& rhs); // no impl
+  ~SystemProcesses();
+
+  //information about system processes
+  int system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const;
+};
+
+SystemProcessInterface::SystemProcesses::SystemProcesses() {
+}
+
+bool SystemProcessInterface::SystemProcesses::initialize() {
+  return true;
+}
+
+SystemProcessInterface::SystemProcesses::~SystemProcesses() {
+}
+int SystemProcessInterface::SystemProcesses::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const {
+  assert(system_processes != NULL, "system_processes pointer is NULL!");
+  assert(no_of_sys_processes != NULL, "system_processes counter pointer is NULL!");
+#ifdef __APPLE__
+  pid_t* pids = NULL;
+  int pid_count = 0;
+  ResourceMark rm;
+
+  int try_count = 0;
+  while (pids == NULL) {
+    // Find out buffer size
+    size_t pids_bytes = proc_listpids(PROC_ALL_PIDS, 0, NULL, 0);
+    if (pids_bytes <= 0) {
+      return OS_ERR;
+    }
+    pid_count = pids_bytes / sizeof(pid_t);
+    pids = NEW_RESOURCE_ARRAY(pid_t, pid_count);
+    memset(pids, 0, pids_bytes);
+
+    pids_bytes = proc_listpids(PROC_ALL_PIDS, 0, pids, pids_bytes);
+    if (pids_bytes <= 0) {
+       // couldn't fit buffer, retry.
+      FREE_RESOURCE_ARRAY(pid_t, pids, pid_count);
+      pids = NULL;
+      try_count++;
+      if (try_count > 3) {
+      return OS_ERR;
+      }
+    } else {
+      pid_count = pids_bytes / sizeof(pid_t);
+    }
+  }
+
+  int process_count = 0;
+  SystemProcess* next = NULL;
+  for (int i = 0; i < pid_count; i++) {
+    pid_t pid = pids[i];
+    if (pid != 0) {
+      char buffer[PROC_PIDPATHINFO_MAXSIZE];
+      memset(buffer, 0 , sizeof(buffer));
+      if (proc_pidpath(pid, buffer, sizeof(buffer)) != -1) {
+        int length = strlen(buffer);
+        if (length > 0) {
+          SystemProcess* current = new SystemProcess();
+          char * path = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
+          strcpy(path, buffer);
+          current->set_path(path);
+          current->set_pid((int)pid);
+          current->set_next(next);
+          next = current;
+          process_count++;
+        }
+      }
+    }
+  }
+
+  *no_of_sys_processes = process_count;
+  *system_processes = next;
+
+  return OS_OK;
+#endif
+  return FUNCTIONALITY_NOT_IMPLEMENTED;
+}
+
+int SystemProcessInterface::system_processes(SystemProcess** system_procs, int* no_of_sys_processes) const {
+  return _impl->system_processes(system_procs, no_of_sys_processes);
+}
+
+SystemProcessInterface::SystemProcessInterface() {
+  _impl = NULL;
+}
+
+bool SystemProcessInterface::initialize() {
+  _impl = new SystemProcessInterface::SystemProcesses();
+  return _impl != NULL && _impl->initialize();
+}
+
+SystemProcessInterface::~SystemProcessInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+ }
+}
+
+CPUInformationInterface::CPUInformationInterface() {
+  _cpu_info = NULL;
+}
+
+bool CPUInformationInterface::initialize() {
+  _cpu_info = new CPUInformation();
+
+  if (NULL == _cpu_info) {
+    return false;
+  }
+  _cpu_info->set_number_of_hardware_threads(VM_Version_Ext::number_of_threads());
+  _cpu_info->set_number_of_cores(VM_Version_Ext::number_of_cores());
+  _cpu_info->set_number_of_sockets(VM_Version_Ext::number_of_sockets());
+  _cpu_info->set_cpu_name(VM_Version_Ext::cpu_name());
+  _cpu_info->set_cpu_description(VM_Version_Ext::cpu_description());
+
+  return true;
+}
+
+CPUInformationInterface::~CPUInformationInterface() {
+  if (_cpu_info != NULL) {
+    if (_cpu_info->cpu_name() != NULL) {
+      const char* cpu_name = _cpu_info->cpu_name();
+      FREE_C_HEAP_ARRAY(char, cpu_name, mtInternal);
+      _cpu_info->set_cpu_name(NULL);
+    }
+    if (_cpu_info->cpu_description() != NULL) {
+      const char* cpu_desc = _cpu_info->cpu_description();
+      FREE_C_HEAP_ARRAY(char, cpu_desc, mtInternal);
+      _cpu_info->set_cpu_description(NULL);
+    }
+    delete _cpu_info;
+  }
+}
+
+int CPUInformationInterface::cpu_information(CPUInformation& cpu_info) {
+  if (NULL == _cpu_info) {
+    return OS_ERR;
+  }
+
+  cpu_info = *_cpu_info; // shallow copy assignment
+  return OS_OK;
+}
+
+class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtInternal> {
+  friend class NetworkPerformanceInterface;
+ private:
+  NetworkPerformance();
+  NetworkPerformance(const NetworkPerformance& rhs); // no impl
+  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  bool initialize();
+  ~NetworkPerformance();
+  int network_utilization(NetworkInterface** network_interfaces) const;
+};
+
+NetworkPerformanceInterface::NetworkPerformance::NetworkPerformance() {
+}
+
+bool NetworkPerformanceInterface::NetworkPerformance::initialize() {
+  return true;
+}
+
+NetworkPerformanceInterface::NetworkPerformance::~NetworkPerformance() {
+}
+
+int NetworkPerformanceInterface::NetworkPerformance::network_utilization(NetworkInterface** network_interfaces) const {
+  size_t len;
+  int mib[] = {CTL_NET, PF_ROUTE, /* protocol number */ 0, /* address family */ 0, NET_RT_IFLIST2, /* NET_RT_FLAGS mask*/ 0};
+  if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL, &len, NULL, 0) != 0) {
+    return OS_ERR;
+  }
+  uint8_t* buf = NEW_RESOURCE_ARRAY(uint8_t, len);
+  if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), buf, &len, NULL, 0) != 0) {
+    return OS_ERR;
+  }
+
+  size_t index = 0;
+  NetworkInterface* ret = NULL;
+  while (index < len) {
+    if_msghdr* msghdr = reinterpret_cast<if_msghdr*>(buf + index);
+    index += msghdr->ifm_msglen;
+
+    if (msghdr->ifm_type != RTM_IFINFO2) {
+      continue;
+    }
+
+    if_msghdr2* msghdr2 = reinterpret_cast<if_msghdr2*>(msghdr);
+    sockaddr_dl* sockaddr = reinterpret_cast<sockaddr_dl*>(msghdr2 + 1);
+
+    // The interface name is not necessarily NUL-terminated
+    char name_buf[128];
+    size_t name_len = MIN2(sizeof(name_buf) - 1, static_cast<size_t>(sockaddr->sdl_nlen));
+    strncpy(name_buf, sockaddr->sdl_data, name_len);
+    name_buf[name_len] = '\0';
+
+    uint64_t bytes_in = msghdr2->ifm_data.ifi_ibytes;
+    uint64_t bytes_out = msghdr2->ifm_data.ifi_obytes;
+
+    NetworkInterface* cur = new NetworkInterface(name_buf, bytes_in, bytes_out, ret);
+    ret = cur;
+  }
+
+  *network_interfaces = ret;
+
+  return OS_OK;
+}
+
+NetworkPerformanceInterface::NetworkPerformanceInterface() {
+  _impl = NULL;
+}
+
+NetworkPerformanceInterface::~NetworkPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+bool NetworkPerformanceInterface::initialize() {
+  _impl = new NetworkPerformanceInterface::NetworkPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+int NetworkPerformanceInterface::network_utilization(NetworkInterface** network_interfaces) const {
+  return _impl->network_utilization(network_interfaces);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/bsd/vm/semaphore_bsd.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled/precompiled.hpp"
+#include "semaphore_bsd.hpp"
+#include "utilities/debug.hpp"
+
+#include <semaphore.h>
+
+#ifdef __APPLE__
+// OS X doesn't support unamed POSIX semaphores, so the implementation in os_posix.cpp can't be used.
+
+static const char* sem_init_strerror(kern_return_t value) {
+  switch (value) {
+    case KERN_INVALID_ARGUMENT:  return "Invalid argument";
+    case KERN_RESOURCE_SHORTAGE: return "Resource shortage";
+    default:                     return "Unknown";
+  }
+}
+
+OSXSemaphore::OSXSemaphore(uint value) {
+  kern_return_t ret = semaphore_create(mach_task_self(), &_semaphore, SYNC_POLICY_FIFO, value);
+
+  guarantee(ret == KERN_SUCCESS, err_msg("Failed to create semaphore: %s", sem_init_strerror(ret)));
+}
+
+OSXSemaphore::~OSXSemaphore() {
+  semaphore_destroy(mach_task_self(), _semaphore);
+}
+
+void OSXSemaphore::signal(uint count) {
+  for (uint i = 0; i < count; i++) {
+    kern_return_t ret = semaphore_signal(_semaphore);
+
+    assert(ret == KERN_SUCCESS, "Failed to signal semaphore");
+  }
+}
+
+void OSXSemaphore::wait() {
+  kern_return_t ret;
+  while ((ret = semaphore_wait(_semaphore)) == KERN_ABORTED) {
+    // Semaphore was interrupted. Retry.
+  }
+  assert(ret == KERN_SUCCESS, "Failed to wait on semaphore");
+}
+
+int64_t OSXSemaphore::currenttime() {
+  struct timeval tv;
+  gettimeofday(&tv, NULL);
+  return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
+}
+
+bool OSXSemaphore::trywait() {
+  return timedwait(0, 0);
+}
+
+bool OSXSemaphore::timedwait(unsigned int sec, int nsec) {
+  kern_return_t kr = KERN_ABORTED;
+  mach_timespec_t waitspec;
+  waitspec.tv_sec = sec;
+  waitspec.tv_nsec = nsec;
+
+  int64_t starttime = currenttime();
+
+  kr = semaphore_timedwait(_semaphore, waitspec);
+  while (kr == KERN_ABORTED) {
+    int64_t totalwait = (sec * NANOSECS_PER_SEC) + nsec;
+
+    int64_t current = currenttime();
+    int64_t passedtime = current - starttime;
+
+    if (passedtime >= totalwait) {
+      waitspec.tv_sec = 0;
+      waitspec.tv_nsec = 0;
+    } else {
+      int64_t waittime = totalwait - (current - starttime);
+      waitspec.tv_sec = waittime / NANOSECS_PER_SEC;
+      waitspec.tv_nsec = waittime % NANOSECS_PER_SEC;
+    }
+
+    kr = semaphore_timedwait(_semaphore, waitspec);
+  }
+
+  return kr == KERN_SUCCESS;
+}
+#endif // __APPLE__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/bsd/vm/semaphore_bsd.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_BSD_VM_SEMAPHORE_BSD_HPP
+#define OS_BSD_VM_SEMAPHORE_BSD_HPP
+
+#ifndef __APPLE__
+// Use POSIX semaphores.
+# include "semaphore_posix.hpp"
+
+#else
+// OS X doesn't support unamed POSIX semaphores, so the implementation in os_posix.cpp can't be used.
+# include "memory/allocation.hpp"
+# include <mach/semaphore.h>
+
+class OSXSemaphore : public CHeapObj<mtInternal>{
+  semaphore_t _semaphore;
+
+  // Prevent copying and assignment.
+  OSXSemaphore(const OSXSemaphore&);
+  OSXSemaphore& operator=(const OSXSemaphore&);
+
+ public:
+  OSXSemaphore(uint value = 0);
+  ~OSXSemaphore();
+
+  void signal(uint count = 1);
+
+  void wait();
+
+  bool trywait();
+  bool timedwait(unsigned int sec, int nsec);
+
+ private:
+  static int64_t currenttime();
+};
+
+typedef OSXSemaphore SemaphoreImpl;
+
+#endif // __APPLE__
+
+#endif // OS_BSD_VM_SEMAPHORE_BSD_HPP
--- a/src/os/linux/vm/os_linux.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -2146,6 +2146,41 @@
    }
 }
 
+int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
+  FILE *procmapsFile = NULL;
+
+  // Open the procfs maps file for the current process
+  if ((procmapsFile = fopen("/proc/self/maps", "r")) != NULL) {
+    // Allocate PATH_MAX for file name plus a reasonable size for other fields.
+    char line[PATH_MAX + 100];
+
+    // Read line by line from 'file'
+    while (fgets(line, sizeof(line), procmapsFile) != NULL) {
+      u8 base, top, offset, inode;
+      char permissions[5];
+      char device[6];
+      char name[PATH_MAX + 1];
+
+      // Parse fields from line
+      sscanf(line, UINT64_FORMAT_X "-" UINT64_FORMAT_X " %4s " UINT64_FORMAT_X " %5s " INT64_FORMAT " %s",
+             &base, &top, permissions, &offset, device, &inode, name);
+
+      // Filter by device id '00:00' so that we only get file system mapped files.
+      if (strcmp(device, "00:00") != 0) {
+
+        // Call callback with the fields of interest
+        if(callback(name, (address)base, (address)top, param)) {
+          // Oops abort, callback aborted
+          fclose(procmapsFile);
+          return 1;
+        }
+      }
+    }
+    fclose(procmapsFile);
+  }
+  return 0;
+}
+
 void os::print_os_info_brief(outputStream* st) {
   os::Linux::print_distro_info(st);
 
@@ -4030,6 +4065,10 @@
   return ::read(fd, buf, nBytes);
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  return ::pread(fd, buf, nBytes, offset);
+}
+
 // TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
 // Solaris uses poll(), linux uses park().
 // Poll() is likely a better choice, assuming that Thread.interrupt()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/linux/vm/os_perf_linux.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "memory/allocation.inline.hpp"
+#include "os_linux.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/os_perf.hpp"
+
+#ifdef TARGET_ARCH_aarch32
+# include "vm_version_ext_aarch32.hpp"
+#endif
+#ifdef TARGET_ARCH_x86
+# include "vm_version_ext_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "vm_version_ext_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "vm_version_ext_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "vm_version_ext_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "vm_version_ext_ppc.hpp"
+#endif
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <pthread.h>
+#include <limits.h>
+#include <ifaddrs.h>
+#include <fcntl.h>
+
+/**
+   /proc/[number]/stat
+              Status information about the process.  This is used by ps(1).  It is defined in /usr/src/linux/fs/proc/array.c.
+
+              The fields, in order, with their proper scanf(3) format specifiers, are:
+
+              1. pid %d The process id.
+
+              2. comm %s
+                     The filename of the executable, in parentheses.  This is visible whether or not the executable is swapped out.
+
+              3. state %c
+                     One  character  from  the  string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible disk
+                     sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging.
+
+              4. ppid %d
+                     The PID of the parent.
+
+              5. pgrp %d
+                     The process group ID of the process.
+
+              6. session %d
+                     The session ID of the process.
+
+              7. tty_nr %d
+                     The tty the process uses.
+
+              8. tpgid %d
+                     The process group ID of the process which currently owns the tty that the process is connected to.
+
+              9. flags %lu
+                     The flags of the process.  The math bit is decimal 4, and the traced bit is decimal 10.
+
+              10. minflt %lu
+                     The number of minor faults the process has made which have not required loading a memory page from disk.
+
+              11. cminflt %lu
+                     The number of minor faults that the process's waited-for children have made.
+
+              12. majflt %lu
+                     The number of major faults the process has made which have required loading a memory page from disk.
+
+              13. cmajflt %lu
+                     The number of major faults that the process's waited-for children have made.
+
+              14. utime %lu
+                     The number of jiffies that this process has been scheduled in user mode.
+
+              15. stime %lu
+                     The number of jiffies that this process has been scheduled in kernel mode.
+
+              16. cutime %ld
+                     The number of jiffies that this process's waited-for children have been scheduled in user mode. (See also times(2).)
+
+              17. cstime %ld
+                     The number of jiffies that this process' waited-for children have been scheduled in kernel mode.
+
+              18. priority %ld
+                     The standard nice value, plus fifteen.  The value is never negative in the kernel.
+
+              19. nice %ld
+                     The nice value ranges from 19 (nicest) to -19 (not nice to others).
+
+              20. 0 %ld  This value is hard coded to 0 as a placeholder for a removed field.
+
+              21. itrealvalue %ld
+                     The time in jiffies before the next SIGALRM is sent to the process due to an interval timer.
+
+              22. starttime %lu
+                     The time in jiffies the process started after system boot.
+
+              23. vsize %lu
+                     Virtual memory size in bytes.
+
+              24. rss %ld
+                     Resident Set Size: number of pages the process has in real memory, minus 3 for administrative purposes. This is just the pages which  count
+                     towards text, data, or stack space.  This does not include pages which have not been demand-loaded in, or which are swapped out.
+
+              25. rlim %lu
+                     Current limit in bytes on the rss of the process (usually 4294967295 on i386).
+
+              26. startcode %lu
+                     The address above which program text can run.
+
+              27. endcode %lu
+                     The address below which program text can run.
+
+              28. startstack %lu
+                     The address of the start of the stack.
+
+              29. kstkesp %lu
+                     The current value of esp (stack pointer), as found in the kernel stack page for the process.
+
+              30. kstkeip %lu
+                     The current EIP (instruction pointer).
+
+              31. signal %lu
+                     The bitmap of pending signals (usually 0).
+
+              32. blocked %lu
+                     The bitmap of blocked signals (usually 0, 2 for shells).
+
+              33. sigignore %lu
+                     The bitmap of ignored signals.
+
+              34. sigcatch %lu
+                     The bitmap of catched signals.
+
+              35. wchan %lu
+                     This  is the "channel" in which the process is waiting.  It is the address of a system call, and can be looked up in a namelist if you need
+                     a textual name.  (If you have an up-to-date /etc/psdatabase, then try ps -l to see the WCHAN field in action.)
+
+              36. nswap %lu
+                     Number of pages swapped - not maintained.
+
+              37. cnswap %lu
+                     Cumulative nswap for child processes.
+
+              38. exit_signal %d
+                     Signal to be sent to parent when we die.
+
+              39. processor %d
+                     CPU number last executed on.
+
+
+
+ ///// SSCANF FORMAT STRING. Copy and use.
+
+field:        1  2  3  4  5  6  7  8  9   10  11  12  13  14  15  16  17  18  19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36  37  38 39
+format:       %d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %d %d
+
+
+*/
+
+/**
+ * For platforms that have them, when declaring
+ * a printf-style function,
+ *   formatSpec is the parameter number (starting at 1)
+ *       that is the format argument ("%d pid %s")
+ *   params is the parameter number where the actual args to
+ *       the format starts. If the args are in a va_list, this
+ *       should be 0.
+ */
+#ifndef PRINTF_ARGS
+#  define PRINTF_ARGS(formatSpec,  params) ATTRIBUTE_PRINTF(formatSpec, params)
+#endif
+
+#ifndef SCANF_ARGS
+#  define SCANF_ARGS(formatSpec,   params) ATTRIBUTE_SCANF(formatSpec, params)
+#endif
+
+#ifndef _PRINTFMT_
+#  define _PRINTFMT_
+#endif
+
+#ifndef _SCANFMT_
+#  define _SCANFMT_
+#endif
+
+
+struct CPUPerfTicks {
+  uint64_t  used;
+  uint64_t  usedKernel;
+  uint64_t  total;
+};
+
+typedef enum {
+  CPU_LOAD_VM_ONLY,
+  CPU_LOAD_GLOBAL,
+} CpuLoadTarget;
+
+enum {
+  UNDETECTED,
+  UNDETECTABLE,
+  LINUX26_NPTL,
+  BAREMETAL
+};
+
+struct CPUPerfCounters {
+  int   nProcs;
+  CPUPerfTicks jvmTicks;
+  CPUPerfTicks* cpus;
+};
+
+static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target);
+
+/** reads /proc/<pid>/stat data, with some checks and some skips.
+ *  Ensure that 'fmt' does _NOT_ contain the first two "%d %s"
+ */
+static int SCANF_ARGS(2, 0) vread_statdata(const char* procfile, _SCANFMT_ const char* fmt, va_list args) {
+  FILE*f;
+  int n;
+  char buf[2048];
+
+  if ((f = fopen(procfile, "r")) == NULL) {
+    return -1;
+  }
+
+  if ((n = fread(buf, 1, sizeof(buf), f)) != -1) {
+    char *tmp;
+
+    buf[n-1] = '\0';
+    /** skip through pid and exec name. */
+    if ((tmp = strrchr(buf, ')')) != NULL) {
+      // skip the ')' and the following space
+      // but check that buffer is long enough
+      tmp += 2;
+      if (tmp < buf + n) {
+        n = vsscanf(tmp, fmt, args);
+      }
+    }
+  }
+
+  fclose(f);
+
+  return n;
+}
+
+static int SCANF_ARGS(2, 3) read_statdata(const char* procfile, _SCANFMT_ const char* fmt, ...) {
+  int   n;
+  va_list args;
+
+  va_start(args, fmt);
+  n = vread_statdata(procfile, fmt, args);
+  va_end(args);
+  return n;
+}
+
+static FILE* open_statfile(void) {
+  FILE *f;
+
+  if ((f = fopen("/proc/stat", "r")) == NULL) {
+    static int haveWarned = 0;
+    if (!haveWarned) {
+      haveWarned = 1;
+    }
+  }
+  return f;
+}
+
+static void
+next_line(FILE *f) {
+  int c;
+  do {
+    c = fgetc(f);
+  } while (c != '\n' && c != EOF);
+}
+
+/**
+ * Return the total number of ticks since the system was booted.
+ * If the usedTicks parameter is not NULL, it will be filled with
+ * the number of ticks spent on actual processes (user, system or
+ * nice processes) since system boot. Note that this is the total number
+ * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is
+ * n times the number of ticks that has passed in clock time.
+ *
+ * Returns a negative value if the reading of the ticks failed.
+ */
+static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
+  FILE*         fh;
+  uint64_t      userTicks, niceTicks, systemTicks, idleTicks;
+  uint64_t      iowTicks = 0, irqTicks = 0, sirqTicks= 0;
+  int           logical_cpu = -1;
+  const int     expected_assign_count = (-1 == which_logical_cpu) ? 4 : 5;
+  int           n;
+
+  if ((fh = open_statfile()) == NULL) {
+    return OS_ERR;
+  }
+  if (-1 == which_logical_cpu) {
+    n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+            UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
+            &userTicks, &niceTicks, &systemTicks, &idleTicks,
+            &iowTicks, &irqTicks, &sirqTicks);
+  } else {
+    // Move to next line
+    next_line(fh);
+
+    // find the line for requested cpu faster to just iterate linefeeds?
+    for (int i = 0; i < which_logical_cpu; i++) {
+      next_line(fh);
+    }
+
+    n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+               UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
+               &logical_cpu, &userTicks, &niceTicks,
+               &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks);
+  }
+
+  fclose(fh);
+  if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
+#ifdef DEBUG_LINUX_PROC_STAT
+    vm_fprintf(stderr, "[stat] read failed");
+#endif
+    return OS_ERR;
+  }
+
+#ifdef DEBUG_LINUX_PROC_STAT
+  vm_fprintf(stderr, "[stat] read "
+          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
+          userTicks, niceTicks, systemTicks, idleTicks,
+          iowTicks, irqTicks, sirqTicks);
+#endif
+
+  pticks->used       = userTicks + niceTicks;
+  pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
+  pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
+                       iowTicks + irqTicks + sirqTicks;
+
+  return OS_OK;
+}
+
+
+static int get_systemtype(void) {
+  static int procEntriesType = UNDETECTED;
+  DIR *taskDir;
+
+  if (procEntriesType != UNDETECTED) {
+    return procEntriesType;
+  }
+
+  // Check whether we have a task subdirectory
+  if ((taskDir = opendir("/proc/self/task")) == NULL) {
+    procEntriesType = UNDETECTABLE;
+  } else {
+    // The task subdirectory exists; we're on a Linux >= 2.6 system
+    closedir(taskDir);
+    procEntriesType = LINUX26_NPTL;
+  }
+
+  return procEntriesType;
+}
+
+/** read user and system ticks from a named procfile, assumed to be in 'stat' format then. */
+static int read_ticks(const char* procfile, uint64_t* userTicks, uint64_t* systemTicks) {
+  return read_statdata(procfile, "%*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u " UINT64_FORMAT " " UINT64_FORMAT,
+    userTicks, systemTicks);
+}
+
+/**
+ * Return the number of ticks spent in any of the processes belonging
+ * to the JVM on any CPU.
+ */
+static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
+  uint64_t userTicks;
+  uint64_t systemTicks;
+
+  if (get_systemtype() != LINUX26_NPTL) {
+    return OS_ERR;
+  }
+
+  if (read_ticks("/proc/self/stat", &userTicks, &systemTicks) != 2) {
+    return OS_ERR;
+  }
+
+  // get the total
+  if (get_total_ticks(-1, pticks) != OS_OK) {
+    return OS_ERR;
+  }
+
+  pticks->used       = userTicks;
+  pticks->usedKernel = systemTicks;
+
+  return OS_OK;
+}
+
+/**
+ * Return the load of the CPU as a double. 1.0 means the CPU process uses all
+ * available time for user or system processes, 0.0 means the CPU uses all time
+ * being idle.
+ *
+ * Returns a negative value if there is a problem in determining the CPU load.
+ */
+static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target) {
+  uint64_t udiff, kdiff, tdiff;
+  CPUPerfTicks* pticks;
+  CPUPerfTicks  tmp;
+  double user_load;
+
+  *pkernelLoad = 0.0;
+
+  if (target == CPU_LOAD_VM_ONLY) {
+    pticks = &counters->jvmTicks;
+  } else if (-1 == which_logical_cpu) {
+    pticks = &counters->cpus[counters->nProcs];
+  } else {
+    pticks = &counters->cpus[which_logical_cpu];
+  }
+
+  tmp = *pticks;
+
+  if (target == CPU_LOAD_VM_ONLY) {
+    if (get_jvm_ticks(pticks) != OS_OK) {
+      return -1.0;
+    }
+  } else if (get_total_ticks(which_logical_cpu, pticks) != OS_OK) {
+    return -1.0;
+  }
+
+  // seems like we sometimes end up with less kernel ticks when
+  // reading /proc/self/stat a second time, timing issue between cpus?
+  if (pticks->usedKernel < tmp.usedKernel) {
+    kdiff = 0;
+  } else {
+    kdiff = pticks->usedKernel - tmp.usedKernel;
+  }
+  tdiff = pticks->total - tmp.total;
+  udiff = pticks->used - tmp.used;
+
+  if (tdiff == 0) {
+    return 0.0;
+  } else if (tdiff < (udiff + kdiff)) {
+    tdiff = udiff + kdiff;
+  }
+  *pkernelLoad = (kdiff / (double)tdiff);
+  // BUG9044876, normalize return values to sane values
+  *pkernelLoad = MAX2<double>(*pkernelLoad, 0.0);
+  *pkernelLoad = MIN2<double>(*pkernelLoad, 1.0);
+
+  user_load = (udiff / (double)tdiff);
+  user_load = MAX2<double>(user_load, 0.0);
+  user_load = MIN2<double>(user_load, 1.0);
+
+  return user_load;
+}
+
+static int SCANF_ARGS(1, 2) parse_stat(_SCANFMT_ const char* fmt, ...) {
+  FILE *f;
+  va_list args;
+
+  va_start(args, fmt);
+
+  if ((f = open_statfile()) == NULL) {
+    va_end(args);
+    return OS_ERR;
+  }
+  for (;;) {
+    char line[80];
+    if (fgets(line, sizeof(line), f) != NULL) {
+      if (vsscanf(line, fmt, args) == 1) {
+        fclose(f);
+        va_end(args);
+        return OS_OK;
+      }
+    } else {
+        fclose(f);
+        va_end(args);
+        return OS_ERR;
+    }
+  }
+}
+
+static int get_noof_context_switches(uint64_t* switches) {
+  return parse_stat("ctxt " UINT64_FORMAT "\n", switches);
+}
+
+/** returns boot time in _seconds_ since epoch */
+static int get_boot_time(uint64_t* time) {
+  return parse_stat("btime " UINT64_FORMAT "\n", time);
+}
+
+static int perf_context_switch_rate(double* rate) {
+  static pthread_mutex_t contextSwitchLock = PTHREAD_MUTEX_INITIALIZER;
+  static uint64_t      lastTime;
+  static uint64_t      lastSwitches;
+  static double        lastRate;
+
+  uint64_t lt = 0;
+  int res = 0;
+
+  if (lastTime == 0) {
+    uint64_t tmp;
+    if (get_boot_time(&tmp) < 0) {
+      return OS_ERR;
+    }
+    lt = tmp * 1000;
+  }
+
+  res = OS_OK;
+
+  pthread_mutex_lock(&contextSwitchLock);
+  {
+
+    uint64_t sw;
+    s8 t, d;
+
+    if (lastTime == 0) {
+      lastTime = lt;
+    }
+
+    t = os::javaTimeMillis();
+    d = t - lastTime;
+
+    if (d == 0) {
+      *rate = lastRate;
+    } else if (!get_noof_context_switches(&sw)) {
+      *rate      = ( (double)(sw - lastSwitches) / d ) * 1000;
+      lastRate     = *rate;
+      lastSwitches = sw;
+      lastTime     = t;
+    } else {
+      *rate = 0;
+      res   = OS_ERR;
+    }
+    if (*rate <= 0) {
+      *rate = 0;
+      lastRate = 0;
+    }
+  }
+  pthread_mutex_unlock(&contextSwitchLock);
+
+  return res;
+}
+
+class CPUPerformanceInterface::CPUPerformance : public CHeapObj<mtInternal> {
+  friend class CPUPerformanceInterface;
+ private:
+  CPUPerfCounters _counters;
+
+  int cpu_load(int which_logical_cpu, double* cpu_load);
+  int context_switch_rate(double* rate);
+  int cpu_load_total_process(double* cpu_load);
+  int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad);
+
+ public:
+  CPUPerformance();
+  bool initialize();
+  ~CPUPerformance();
+};
+
+CPUPerformanceInterface::CPUPerformance::CPUPerformance() {
+  _counters.nProcs = os::active_processor_count();
+  _counters.cpus = NULL;
+}
+
+bool CPUPerformanceInterface::CPUPerformance::initialize() {
+  size_t tick_array_size = (_counters.nProcs +1) * sizeof(CPUPerfTicks);
+  _counters.cpus = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
+  if (NULL == _counters.cpus) {
+    return false;
+  }
+  memset(_counters.cpus, 0, tick_array_size);
+
+  // For the CPU load total
+  get_total_ticks(-1, &_counters.cpus[_counters.nProcs]);
+
+  // For each CPU
+  for (int i = 0; i < _counters.nProcs; i++) {
+    get_total_ticks(i, &_counters.cpus[i]);
+  }
+  // For JVM load
+  get_jvm_ticks(&_counters.jvmTicks);
+
+  // initialize context switch system
+  // the double is only for init
+  double init_ctx_switch_rate;
+  perf_context_switch_rate(&init_ctx_switch_rate);
+
+  return true;
+}
+
+CPUPerformanceInterface::CPUPerformance::~CPUPerformance() {
+  if (_counters.cpus != NULL) {
+    FREE_C_HEAP_ARRAY(char, _counters.cpus, mtInternal);
+  }
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, double* cpu_load) {
+  double u, s;
+  u = get_cpu_load(which_logical_cpu, &_counters, &s, CPU_LOAD_GLOBAL);
+  if (u < 0) {
+    *cpu_load = 0.0;
+    return OS_ERR;
+  }
+  // Cap total systemload to 1.0
+  *cpu_load = MIN2<double>((u + s), 1.0);
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_load) {
+  double u, s;
+  u = get_cpu_load(-1, &_counters, &s, CPU_LOAD_VM_ONLY);
+  if (u < 0) {
+    *cpu_load = 0.0;
+    return OS_ERR;
+  }
+  *cpu_load = u + s;
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) {
+  double u, s, t;
+
+  assert(pjvmUserLoad != NULL, "pjvmUserLoad not inited");
+  assert(pjvmKernelLoad != NULL, "pjvmKernelLoad not inited");
+  assert(psystemTotalLoad != NULL, "psystemTotalLoad not inited");
+
+  u = get_cpu_load(-1, &_counters, &s, CPU_LOAD_VM_ONLY);
+  if (u < 0) {
+    *pjvmUserLoad = 0.0;
+    *pjvmKernelLoad = 0.0;
+    *psystemTotalLoad = 0.0;
+    return OS_ERR;
+  }
+
+  cpu_load(-1, &t);
+  // clamp at user+system and 1.0
+  if (u + s > t) {
+    t = MIN2<double>(u + s, 1.0);
+  }
+
+  *pjvmUserLoad = u;
+  *pjvmKernelLoad = s;
+  *psystemTotalLoad = t;
+
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::context_switch_rate(double* rate) {
+  return perf_context_switch_rate(rate);
+}
+
+CPUPerformanceInterface::CPUPerformanceInterface() {
+  _impl = NULL;
+}
+
+bool CPUPerformanceInterface::initialize() {
+  _impl = new CPUPerformanceInterface::CPUPerformance();
+  return NULL == _impl ? false : _impl->initialize();
+}
+
+CPUPerformanceInterface::~CPUPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+int CPUPerformanceInterface::cpu_load(int which_logical_cpu, double* cpu_load) const {
+  return _impl->cpu_load(which_logical_cpu, cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_load_total_process(double* cpu_load) const {
+  return _impl->cpu_load_total_process(cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) const {
+  return _impl->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotalLoad);
+}
+
+int CPUPerformanceInterface::context_switch_rate(double* rate) const {
+  return _impl->context_switch_rate(rate);
+}
+
+class SystemProcessInterface::SystemProcesses : public CHeapObj<mtInternal> {
+  friend class SystemProcessInterface;
+ private:
+  class ProcessIterator : public CHeapObj<mtInternal> {
+    friend class SystemProcessInterface::SystemProcesses;
+   private:
+    DIR*           _dir;
+    struct dirent* _entry;
+    bool           _valid;
+    char           _exeName[PATH_MAX];
+    char           _exePath[PATH_MAX];
+
+    ProcessIterator();
+    ~ProcessIterator();
+    bool initialize();
+
+    bool is_valid() const { return _valid; }
+    bool is_valid_entry(struct dirent* entry) const;
+    bool is_dir(const char* name) const;
+    int  fsize(const char* name, uint64_t& size) const;
+
+    char* allocate_string(const char* str) const;
+    void  get_exe_name();
+    char* get_exe_path();
+    char* get_cmdline();
+
+    int current(SystemProcess* process_info);
+    int next_process();
+  };
+
+  ProcessIterator* _iterator;
+  SystemProcesses();
+  bool initialize();
+  ~SystemProcesses();
+
+  //information about system processes
+  int system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const;
+};
+
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_dir(const char* name) const {
+  struct stat mystat;
+  int ret_val = 0;
+
+  ret_val = stat(name, &mystat);
+  if (ret_val < 0) {
+    return false;
+  }
+  ret_val = S_ISDIR(mystat.st_mode);
+  return ret_val > 0;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::fsize(const char* name, uint64_t& size) const {
+  assert(name != NULL, "name pointer is NULL!");
+  size = 0;
+  struct stat fbuf;
+
+  if (stat(name, &fbuf) < 0) {
+    return OS_ERR;
+  }
+  size = fbuf.st_size;
+  return OS_OK;
+}
+
+// if it has a numeric name, is a directory and has a 'stat' file in it
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_valid_entry(struct dirent* entry) const {
+  char buffer[PATH_MAX];
+  uint64_t size = 0;
+
+  if (atoi(entry->d_name) != 0) {
+    jio_snprintf(buffer, PATH_MAX, "/proc/%s", entry->d_name);
+    buffer[PATH_MAX - 1] = '\0';
+
+    if (is_dir(buffer)) {
+      jio_snprintf(buffer, PATH_MAX, "/proc/%s/stat", entry->d_name);
+      buffer[PATH_MAX - 1] = '\0';
+      if (fsize(buffer, size) != OS_ERR) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+// get exe-name from /proc/<pid>/stat
+void SystemProcessInterface::SystemProcesses::ProcessIterator::get_exe_name() {
+  FILE* fp;
+  char  buffer[PATH_MAX];
+
+  jio_snprintf(buffer, PATH_MAX, "/proc/%s/stat", _entry->d_name);
+  buffer[PATH_MAX - 1] = '\0';
+  if ((fp = fopen(buffer, "r")) != NULL) {
+    if (fgets(buffer, PATH_MAX, fp) != NULL) {
+      char* start, *end;
+      // exe-name is between the first pair of ( and )
+      start = strchr(buffer, '(');
+      if (start != NULL && start[1] != '\0') {
+        start++;
+        end = strrchr(start, ')');
+        if (end != NULL) {
+          size_t len;
+          len = MIN2<size_t>(end - start, sizeof(_exeName) - 1);
+          memcpy(_exeName, start, len);
+          _exeName[len] = '\0';
+        }
+      }
+    }
+    fclose(fp);
+  }
+}
+
+// get command line from /proc/<pid>/cmdline
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::get_cmdline() {
+  FILE* fp;
+  char  buffer[PATH_MAX];
+  char* cmdline = NULL;
+
+  jio_snprintf(buffer, PATH_MAX, "/proc/%s/cmdline", _entry->d_name);
+  buffer[PATH_MAX - 1] = '\0';
+  if ((fp = fopen(buffer, "r")) != NULL) {
+    size_t size = 0;
+    char   dummy;
+
+    // find out how long the file is (stat always returns 0)
+    while (fread(&dummy, 1, 1, fp) == 1) {
+      size++;
+    }
+    if (size > 0) {
+      cmdline = NEW_C_HEAP_ARRAY(char, size + 1, mtInternal);
+      if (cmdline != NULL) {
+        cmdline[0] = '\0';
+        if (fseek(fp, 0, SEEK_SET) == 0) {
+          if (fread(cmdline, 1, size, fp) == size) {
+            // the file has the arguments separated by '\0',
+            // so we translate '\0' to ' '
+            for (size_t i = 0; i < size; i++) {
+              if (cmdline[i] == '\0') {
+                cmdline[i] = ' ';
+              }
+            }
+            cmdline[size] = '\0';
+          }
+        }
+      }
+    }
+    fclose(fp);
+  }
+  return cmdline;
+}
+
+// get full path to exe from /proc/<pid>/exe symlink
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::get_exe_path() {
+  char buffer[PATH_MAX];
+
+  jio_snprintf(buffer, PATH_MAX, "/proc/%s/exe", _entry->d_name);
+  buffer[PATH_MAX - 1] = '\0';
+  return realpath(buffer, _exePath);
+}
+
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
+  if (str != NULL) {
+    size_t len = strlen(str);
+    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
+    strncpy(tmp, str, len);
+    tmp[len] = '\0';
+    return tmp;
+  }
+  return NULL;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::current(SystemProcess* process_info) {
+  if (!is_valid()) {
+    return OS_ERR;
+  }
+
+  process_info->set_pid(atoi(_entry->d_name));
+
+  get_exe_name();
+  process_info->set_name(allocate_string(_exeName));
+
+  if (get_exe_path() != NULL) {
+     process_info->set_path(allocate_string(_exePath));
+  }
+
+  char* cmdline = NULL;
+  cmdline = get_cmdline();
+  if (cmdline != NULL) {
+    process_info->set_command_line(allocate_string(cmdline));
+    FREE_C_HEAP_ARRAY(char, cmdline, mtInternal);
+  }
+
+  return OS_OK;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::next_process() {
+  if (!is_valid()) {
+    return OS_ERR;
+  }
+
+  do {
+    _entry = os::readdir(_dir);
+    if (_entry == NULL) {
+      // Error or reached end.  Could use errno to distinguish those cases.
+      _valid = false;
+      return OS_ERR;
+    }
+  } while(!is_valid_entry(_entry));
+
+  _valid = true;
+  return OS_OK;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::ProcessIterator() {
+  _dir = NULL;
+  _entry = NULL;
+  _valid = false;
+}
+
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::initialize() {
+  _dir = os::opendir("/proc");
+  _entry = NULL;
+  _valid = true;
+  next_process();
+
+  return true;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::~ProcessIterator() {
+  if (_dir != NULL) {
+    os::closedir(_dir);
+  }
+}
+
+SystemProcessInterface::SystemProcesses::SystemProcesses() {
+  _iterator = NULL;
+}
+
+bool SystemProcessInterface::SystemProcesses::initialize() {
+  _iterator = new SystemProcessInterface::SystemProcesses::ProcessIterator();
+  return NULL == _iterator ? false : _iterator->initialize();
+}
+
+SystemProcessInterface::SystemProcesses::~SystemProcesses() {
+  if (_iterator != NULL) {
+    delete _iterator;
+  }
+}
+
+int SystemProcessInterface::SystemProcesses::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const {
+  assert(system_processes != NULL, "system_processes pointer is NULL!");
+  assert(no_of_sys_processes != NULL, "system_processes counter pointers is NULL!");
+  assert(_iterator != NULL, "iterator is NULL!");
+
+  // initialize pointers
+  *no_of_sys_processes = 0;
+  *system_processes = NULL;
+
+  while (_iterator->is_valid()) {
+    SystemProcess* tmp = new SystemProcess();
+    _iterator->current(tmp);
+
+    //if already existing head
+    if (*system_processes != NULL) {
+      //move "first to second"
+      tmp->set_next(*system_processes);
+    }
+    // new head
+    *system_processes = tmp;
+    // increment
+    (*no_of_sys_processes)++;
+    // step forward
+    _iterator->next_process();
+  }
+  return OS_OK;
+}
+
+int SystemProcessInterface::system_processes(SystemProcess** system_procs, int* no_of_sys_processes) const {
+  return _impl->system_processes(system_procs, no_of_sys_processes);
+}
+
+SystemProcessInterface::SystemProcessInterface() {
+  _impl = NULL;
+}
+
+bool SystemProcessInterface::initialize() {
+  _impl = new SystemProcessInterface::SystemProcesses();
+  return NULL == _impl ? false : _impl->initialize();
+}
+
+SystemProcessInterface::~SystemProcessInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+CPUInformationInterface::CPUInformationInterface() {
+  _cpu_info = NULL;
+}
+
+bool CPUInformationInterface::initialize() {
+  _cpu_info = new CPUInformation();
+  if (NULL == _cpu_info) {
+    return false;
+  }
+  _cpu_info->set_number_of_hardware_threads(VM_Version_Ext::number_of_threads());
+  _cpu_info->set_number_of_cores(VM_Version_Ext::number_of_cores());
+  _cpu_info->set_number_of_sockets(VM_Version_Ext::number_of_sockets());
+  _cpu_info->set_cpu_name(VM_Version_Ext::cpu_name());
+  _cpu_info->set_cpu_description(VM_Version_Ext::cpu_description());
+
+  return true;
+}
+
+CPUInformationInterface::~CPUInformationInterface() {
+  if (_cpu_info != NULL) {
+    if (_cpu_info->cpu_name() != NULL) {
+      const char* cpu_name = _cpu_info->cpu_name();
+      FREE_C_HEAP_ARRAY(char, cpu_name, mtInternal);
+      _cpu_info->set_cpu_name(NULL);
+    }
+    if (_cpu_info->cpu_description() != NULL) {
+       const char* cpu_desc = _cpu_info->cpu_description();
+       FREE_C_HEAP_ARRAY(char, cpu_desc, mtInternal);
+      _cpu_info->set_cpu_description(NULL);
+    }
+    delete _cpu_info;
+  }
+}
+
+int CPUInformationInterface::cpu_information(CPUInformation& cpu_info) {
+  if (_cpu_info == NULL) {
+    return OS_ERR;
+  }
+
+  cpu_info = *_cpu_info; // shallow copy assignment
+  return OS_OK;
+}
+
+class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtInternal> {
+  friend class NetworkPerformanceInterface;
+ private:
+  NetworkPerformance();
+  NetworkPerformance(const NetworkPerformance& rhs); // no impl
+  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  bool initialize();
+  ~NetworkPerformance();
+  int64_t read_counter(const char* iface, const char* counter) const;
+  int network_utilization(NetworkInterface** network_interfaces) const;
+};
+
+NetworkPerformanceInterface::NetworkPerformance::NetworkPerformance() {
+
+}
+
+bool NetworkPerformanceInterface::NetworkPerformance::initialize() {
+  return true;
+}
+
+NetworkPerformanceInterface::NetworkPerformance::~NetworkPerformance() {
+}
+
+int64_t NetworkPerformanceInterface::NetworkPerformance::read_counter(const char* iface, const char* counter) const {
+  char buf[128];
+
+  snprintf(buf, sizeof(buf), "/sys/class/net/%s/statistics/%s", iface, counter);
+
+  int fd = open(buf, O_RDONLY);
+  if (fd == -1) {
+    return -1;
+  }
+
+  ssize_t num_bytes = read(fd, buf, sizeof(buf));
+  close(fd);
+  if ((num_bytes == -1) || (num_bytes >= static_cast<ssize_t>(sizeof(buf))) || (num_bytes < 1)) {
+    return -1;
+  }
+
+  buf[num_bytes] = '\0';
+  int64_t value = strtoll(buf, NULL, 10);
+
+  return value;
+}
+
+int NetworkPerformanceInterface::NetworkPerformance::network_utilization(NetworkInterface** network_interfaces) const
+{
+  ifaddrs* addresses;
+  ifaddrs* cur_address;
+
+  if (getifaddrs(&addresses) != 0) {
+    return OS_ERR;
+  }
+
+  NetworkInterface* ret = NULL;
+  for (cur_address = addresses; cur_address != NULL; cur_address = cur_address->ifa_next) {
+    if ((cur_address->ifa_addr == NULL) || (cur_address->ifa_addr->sa_family != AF_PACKET)) {
+      continue;
+    }
+
+    int64_t bytes_in = read_counter(cur_address->ifa_name, "rx_bytes");
+    int64_t bytes_out = read_counter(cur_address->ifa_name, "tx_bytes");
+
+    NetworkInterface* cur = new NetworkInterface(cur_address->ifa_name, bytes_in, bytes_out, ret);
+    ret = cur;
+  }
+
+  freeifaddrs(addresses);
+  *network_interfaces = ret;
+
+  return OS_OK;
+}
+
+NetworkPerformanceInterface::NetworkPerformanceInterface() {
+  _impl = NULL;
+}
+
+NetworkPerformanceInterface::~NetworkPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+bool NetworkPerformanceInterface::initialize() {
+  _impl = new NetworkPerformanceInterface::NetworkPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+int NetworkPerformanceInterface::network_utilization(NetworkInterface** network_interfaces) const {
+  return _impl->network_utilization(network_interfaces);
+}
--- a/src/os/linux/vm/perfMemory_linux.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -34,6 +34,7 @@
 #include "utilities/exceptions.hpp"
 
 // put OS-includes here
+#include <dirent.h>
 # include <sys/types.h>
 # include <sys/mman.h>
 # include <errno.h>
--- a/src/os/posix/vm/os_posix.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/posix/vm/os_posix.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -858,6 +858,68 @@
   }
 }
 
+Thread* os::ThreadCrashProtection::_protected_thread = NULL;
+os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
+volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
+
+os::ThreadCrashProtection::ThreadCrashProtection() {
+}
+
+/*
+ * See the caveats for this class in os_posix.hpp
+ * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
+ * method and returns false. If none of the signals are raised, returns true.
+ * The callback is supposed to provide the method that should be protected.
+ */
+bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
+  sigset_t saved_sig_mask;
+
+  Thread::muxAcquire(&_crash_mux, "CrashProtection");
+
+  _protected_thread = ThreadLocalStorage::thread();
+  assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
+
+  // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
+  // since on at least some systems (OS X) siglongjmp will restore the mask
+  // for the process, not the thread
+  pthread_sigmask(0, NULL, &saved_sig_mask);
+  if (sigsetjmp(_jmpbuf, 0) == 0) {
+    // make sure we can see in the signal handler that we have crash protection
+    // installed
+    _crash_protection = this;
+    cb.call();
+    // and clear the crash protection
+    _crash_protection = NULL;
+    _protected_thread = NULL;
+    Thread::muxRelease(&_crash_mux);
+    return true;
+  }
+  // this happens when we siglongjmp() back
+  pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
+  _crash_protection = NULL;
+  _protected_thread = NULL;
+  Thread::muxRelease(&_crash_mux);
+  return false;
+}
+
+void os::ThreadCrashProtection::restore() {
+  assert(_crash_protection != NULL, "must have crash protection");
+  siglongjmp(_jmpbuf, 1);
+}
+
+void os::ThreadCrashProtection::check_crash_protection(int sig,
+    Thread* thread) {
+
+  if (thread != NULL &&
+      thread == _protected_thread &&
+      _crash_protection != NULL) {
+
+    if (sig == SIGSEGV || sig == SIGBUS) {
+      _crash_protection->restore();
+    }
+  }
+}
+
 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
 }
--- a/src/os/posix/vm/os_posix.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/posix/vm/os_posix.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -62,6 +62,33 @@
 };
 
 /*
+ * Crash protection utility used by JFR. Wrap the callback
+ * with a sigsetjmp and in case of a SIGSEGV/SIGBUS we siglongjmp
+ * back.
+ * To be able to use this - don't take locks, don't rely on destructors,
+ * don't make OS library calls, don't allocate memory, don't print,
+ * don't call code that could leave the heap / memory in an inconsistent state,
+ * or anything else where we are not in control if we suddenly jump out.
+ */
+class ThreadCrashProtection : public StackObj {
+public:
+  static bool is_crash_protected(Thread* thr) {
+    return _crash_protection != NULL && _protected_thread == thr;
+  }
+
+  ThreadCrashProtection();
+  bool call(os::CrashProtectionCallback& cb);
+
+  static void check_crash_protection(int signal, Thread* thread);
+private:
+  static Thread* _protected_thread;
+  static ThreadCrashProtection* _crash_protection;
+  static volatile intptr_t _crash_mux;
+  void restore();
+  sigjmp_buf _jmpbuf;
+};
+
+/*
  * Crash protection for the watcher thread. Wrap the callback
  * with a sigsetjmp and in case of a SIGSEGV/SIGBUS we siglongjmp
  * back.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/vm/semaphore_posix.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled/precompiled.hpp"
+#ifndef __APPLE__
+#include "runtime/os.hpp"
+// POSIX unamed semaphores are not supported on OS X.
+#include "semaphore_posix.hpp"
+#include <semaphore.h>
+
+#define check_with_errno(check_type, cond, msg)                             \
+  do {                                                                      \
+    int err = errno;                                                        \
+    check_type(cond, /*"%s; error='%s' (errno=%s)", */msg/*, os::strerror(err),*/   \
+               /*os::errno_name(err)*/);                                        \
+} while (false)
+
+#define assert_with_errno(cond, msg)    check_with_errno(assert, cond, msg)
+#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
+
+PosixSemaphore::PosixSemaphore(uint value) {
+  int ret = sem_init(&_semaphore, 0, value);
+
+  guarantee_with_errno(ret == 0, "Failed to initialize semaphore");
+}
+
+PosixSemaphore::~PosixSemaphore() {
+  sem_destroy(&_semaphore);
+}
+
+void PosixSemaphore::signal(uint count) {
+  for (uint i = 0; i < count; i++) {
+    int ret = sem_post(&_semaphore);
+
+    assert_with_errno(ret == 0, "sem_post failed");
+  }
+}
+
+void PosixSemaphore::wait() {
+  int ret;
+
+  do {
+    ret = sem_wait(&_semaphore);
+  } while (ret != 0 && errno == EINTR);
+
+  assert_with_errno(ret == 0, "sem_wait failed");
+}
+
+bool PosixSemaphore::trywait() {
+  int ret;
+
+  do {
+    ret = sem_trywait(&_semaphore);
+  } while (ret != 0 && errno == EINTR);
+
+  assert_with_errno(ret == 0 || errno == EAGAIN, "trywait failed");
+
+  return ret == 0;
+}
+
+bool PosixSemaphore::timedwait(struct timespec ts) {
+  while (true) {
+    int result = sem_timedwait(&_semaphore, &ts);
+    if (result == 0) {
+      return true;
+    } else if (errno == EINTR) {
+      continue;
+    } else if (errno == ETIMEDOUT) {
+      return false;
+    } else {
+      assert_with_errno(false, "timedwait failed");
+      return false;
+    }
+  }
+}
+#endif // __APPLE__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/vm/semaphore_posix.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_POSIX_VM_SEMAPHORE_POSIX_HPP
+#define OS_POSIX_VM_SEMAPHORE_POSIX_HPP
+
+#include "memory/allocation.hpp"
+
+#include <semaphore.h>
+
+class PosixSemaphore : public CHeapObj<mtInternal> {
+  sem_t _semaphore;
+
+  // Prevent copying and assignment.
+  PosixSemaphore(const PosixSemaphore&);
+  PosixSemaphore& operator=(const PosixSemaphore&);
+
+ public:
+  PosixSemaphore(uint value = 0);
+  ~PosixSemaphore();
+
+  void signal(uint count = 1);
+
+  void wait();
+
+  bool trywait();
+  bool timedwait(struct timespec ts);
+};
+
+typedef PosixSemaphore SemaphoreImpl;
+
+#endif // OS_POSIX_VM_SEMAPHORE_POSIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/solaris/vm/os_perf_solaris.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,851 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "memory/allocation.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/os_perf.hpp"
+#include "os_solaris.inline.hpp"
+#include "utilities/macros.hpp"
+
+#ifdef TARGET_ARCH_aarch32
+# include "vm_version_ext_aarch32.hpp"
+#endif
+#ifdef TARGET_ARCH_x86
+# include "vm_version_ext_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "vm_version_ext_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "vm_version_ext_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "vm_version_ext_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "vm_version_ext_ppc.hpp"
+#endif
+
+#include <sys/types.h>
+#include <procfs.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <kstat.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/sysinfo.h>
+#include <sys/lwp.h>
+#include <pthread.h>
+#include <time.h>
+#include <utmpx.h>
+#include <dlfcn.h>
+#include <sys/loadavg.h>
+#include <limits.h>
+
+static const double NANOS_PER_SEC = 1000000000.0;
+
+struct CPUPerfTicks {
+  kstat_t* kstat;
+  uint64_t last_idle;
+  uint64_t last_total;
+  double   last_ratio;
+};
+
+struct CPUPerfCounters {
+  int           nProcs;
+  CPUPerfTicks* jvmTicks;
+  kstat_ctl_t*  kstat_ctrl;
+};
+
+static int get_info(const char* path, void* info, size_t s, off_t o) {
+  assert(path != NULL, "path is NULL!");
+  assert(info != NULL, "info is NULL!");
+
+  int fd = -1;
+
+  if ((fd = open(path, O_RDONLY)) < 0) {
+    return OS_ERR;
+  }
+  if (pread(fd, info, s, o) != s) {
+    close(fd);
+    return OS_ERR;
+  }
+  close(fd);
+  return OS_OK;
+}
+
+static int get_psinfo2(void* info, size_t s, off_t o) {
+  return get_info("/proc/self/psinfo", info, s, o);
+}
+
+static int get_psinfo(psinfo_t* info) {
+  return get_psinfo2(info, sizeof(*info), 0);
+}
+
+static int get_psinfo(char* file, psinfo_t* info) {
+  assert(file != NULL, "file is NULL!");
+  assert(info != NULL, "info is NULL!");
+  return get_info(file, info, sizeof(*info), 0);
+}
+
+
+static int get_usage(prusage_t* usage) {
+  assert(usage != NULL, "usage is NULL!");
+  return get_info("/proc/self/usage", usage, sizeof(*usage), 0);
+}
+
+static int read_cpustat(kstat_ctl_t* kstat_ctrl, CPUPerfTicks* load, cpu_stat_t* cpu_stat) {
+  assert(kstat_ctrl != NULL, "kstat_ctrl pointer is NULL!");
+  assert(load != NULL, "load pointer is NULL!");
+  assert(cpu_stat != NULL, "cpu_stat pointer is NULL!");
+
+  if (load->kstat == NULL) {
+    // no handle.
+    return OS_ERR;
+  }
+  if (kstat_read(kstat_ctrl, load->kstat, cpu_stat) == OS_ERR) {
+    // disable handle for this CPU
+     load->kstat = NULL;
+     return OS_ERR;
+  }
+  return OS_OK;
+}
+
+static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters) {
+  assert(counters != NULL, "counters pointer is NULL!");
+
+  cpu_stat_t  cpu_stat = {0};
+
+  if (which_logical_cpu >= counters->nProcs) {
+    return .0;
+  }
+
+  CPUPerfTicks load = counters->jvmTicks[which_logical_cpu];
+  if (read_cpustat(counters->kstat_ctrl, &load, &cpu_stat) != OS_OK) {
+    return .0;
+  }
+
+  uint_t* usage = cpu_stat.cpu_sysinfo.cpu;
+  if (usage == NULL) {
+    return .0;
+  }
+
+  uint64_t c_idle  = usage[CPU_IDLE];
+  uint64_t c_total = 0;
+
+  for (int i = 0; i < CPU_STATES; i++) {
+    c_total += usage[i];
+  }
+
+  // Calculate diff against previous snapshot
+  uint64_t d_idle  = c_idle - load.last_idle;
+  uint64_t d_total = c_total - load.last_total;
+
+  /** update if weve moved */
+  if (d_total > 0) {
+    // Save current values for next time around
+    load.last_idle  = c_idle;
+    load.last_total = c_total;
+    load.last_ratio = (double) (d_total - d_idle) / d_total;
+  }
+
+  return load.last_ratio;
+}
+
+static int get_boot_time(uint64_t* time) {
+  assert(time != NULL, "time pointer is NULL!");
+  setutxent();
+  for(;;) {
+    struct utmpx* u;
+    if ((u = getutxent()) == NULL) {
+      break;
+    }
+    if (u->ut_type == BOOT_TIME) {
+      *time = u->ut_xtime;
+      endutxent();
+      return OS_OK;
+    }
+  }
+  endutxent();
+  return OS_ERR;
+}
+
+static int get_noof_context_switches(CPUPerfCounters* counters, uint64_t* switches) {
+  assert(switches != NULL, "switches pointer is NULL!");
+  assert(counters != NULL, "counter pointer is NULL!");
+  *switches = 0;
+  uint64_t s = 0;
+
+  // Collect data from all CPUs
+  for (int i = 0; i < counters->nProcs; i++) {
+    cpu_stat_t cpu_stat = {0};
+    CPUPerfTicks load = counters->jvmTicks[i];
+
+    if (read_cpustat(counters->kstat_ctrl, &load, &cpu_stat) == OS_OK) {
+      s += cpu_stat.cpu_sysinfo.pswitch;
+    } else {
+      //fail fast...
+      return OS_ERR;
+    }
+  }
+  *switches = s;
+  return OS_OK;
+}
+
+static int perf_context_switch_rate(CPUPerfCounters* counters, double* rate) {
+  assert(counters != NULL, "counters is NULL!");
+  assert(rate != NULL, "rate pointer is NULL!");
+  static pthread_mutex_t contextSwitchLock = PTHREAD_MUTEX_INITIALIZER;
+  static uint64_t lastTime = 0;
+  static uint64_t lastSwitches = 0;
+  static double   lastRate = 0.0;
+
+  uint64_t lt = 0;
+  int res = 0;
+
+  if (lastTime == 0) {
+    uint64_t tmp;
+    if (get_boot_time(&tmp) < 0) {
+      return OS_ERR;
+    }
+    lt = tmp * 1000;
+  }
+
+  res = OS_OK;
+
+  pthread_mutex_lock(&contextSwitchLock);
+  {
+
+    uint64_t sw = 0;
+    clock_t t, d;
+
+    if (lastTime == 0) {
+      lastTime = lt;
+    }
+
+    t = clock();
+    d = t - lastTime;
+
+    if (d == 0) {
+      *rate = lastRate;
+    } else if (get_noof_context_switches(counters, &sw)== OS_OK) {
+      *rate      = ((double)(sw - lastSwitches) / d) * 1000;
+      lastRate     = *rate;
+      lastSwitches = sw;
+      lastTime     = t;
+    } else {
+      *rate = 0.0;
+      res   = OS_ERR;
+    }
+    if (*rate < 0.0) {
+      *rate = 0.0;
+      lastRate = 0.0;
+    }
+  }
+  pthread_mutex_unlock(&contextSwitchLock);
+  return res;
+ }
+
+
+
+class CPUPerformanceInterface::CPUPerformance : public CHeapObj<mtInternal> {
+   friend class CPUPerformanceInterface;
+ private:
+  CPUPerfCounters _counters;
+  int cpu_load(int which_logical_cpu, double* cpu_load);
+  int context_switch_rate(double* rate);
+  int cpu_load_total_process(double* cpu_load);
+  int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad);
+
+  CPUPerformance();
+  ~CPUPerformance();
+  bool initialize();
+};
+
+CPUPerformanceInterface::CPUPerformance::CPUPerformance() {
+  _counters.nProcs = 0;
+  _counters.jvmTicks = NULL;
+  _counters.kstat_ctrl = NULL;
+}
+
+bool CPUPerformanceInterface::CPUPerformance::initialize() {
+  // initialize kstat control structure,
+  _counters.kstat_ctrl = kstat_open();
+  assert(_counters.kstat_ctrl != NULL, "error initializing kstat control structure!");
+
+  if (NULL == _counters.kstat_ctrl) {
+    return false;
+  }
+
+  // Get number of CPU(s)
+  if ((_counters.nProcs = sysconf(_SC_NPROCESSORS_ONLN)) == OS_ERR) {
+    // ignore error?
+    _counters.nProcs = 1;
+  }
+
+  assert(_counters.nProcs > 0, "no CPUs detected in sysconf call!");
+  if (_counters.nProcs == 0) {
+    return false;
+  }
+
+  // Data structure(s) for saving CPU load (one per CPU)
+  size_t tick_array_size = _counters.nProcs * sizeof(CPUPerfTicks);
+  _counters.jvmTicks = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
+  if (NULL == _counters.jvmTicks) {
+    return false;
+  }
+  memset(_counters.jvmTicks, 0, tick_array_size);
+
+  // Get kstat cpu_stat counters for every CPU
+  // loop over kstat to find our cpu_stat(s)
+  int i = 0;
+  for (kstat_t* kstat = _counters.kstat_ctrl->kc_chain; kstat != NULL; kstat = kstat->ks_next) {
+    if (strncmp(kstat->ks_module, "cpu_stat", 8) == 0) {
+      if (kstat_read(_counters.kstat_ctrl, kstat, NULL) == OS_ERR) {
+        continue;
+      }
+      if (i == _counters.nProcs) {
+        // more cpu_stats than reported CPUs
+        break;
+      }
+      _counters.jvmTicks[i++].kstat = kstat;
+    }
+  }
+  return true;
+}
+
+CPUPerformanceInterface::CPUPerformance::~CPUPerformance() {
+  if (_counters.jvmTicks != NULL) {
+    FREE_C_HEAP_ARRAY(char, _counters.jvmTicks, mtInternal);
+  }
+  if (_counters.kstat_ctrl != NULL) {
+    kstat_close(_counters.kstat_ctrl);
+  }
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, double* cpu_load) {
+  assert(cpu_load != NULL, "cpu_load pointer is NULL!");
+  double t = .0;
+  if (-1 == which_logical_cpu) {
+    for (int i = 0; i < _counters.nProcs; i++) {
+      t += get_cpu_load(i, &_counters);
+    }
+    // Cap total systemload to 1.0
+    t = MIN2<double>((t / _counters.nProcs), 1.0);
+  } else {
+    t = MIN2<double>(get_cpu_load(which_logical_cpu, &_counters), 1.0);
+  }
+
+  *cpu_load = t;
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_load) {
+  assert(cpu_load != NULL, "cpu_load pointer is NULL!");
+
+  psinfo_t info;
+
+  // Get the percentage of "recent cpu usage" from all the lwp:s in the JVM:s
+  // process. This is returned as a value between 0.0 and 1.0 multiplied by 0x8000.
+  if (get_psinfo2(&info.pr_pctcpu, sizeof(info.pr_pctcpu), offsetof(psinfo_t, pr_pctcpu)) != 0) {
+    *cpu_load = 0.0;
+    return OS_ERR;
+  }
+  *cpu_load = (double) info.pr_pctcpu / 0x8000;
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) {
+  assert(pjvmUserLoad != NULL, "pjvmUserLoad not inited");
+  assert(pjvmKernelLoad != NULL, "pjvmKernelLoad not inited");
+  assert(psystemTotalLoad != NULL, "psystemTotalLoad not inited");
+
+  static uint64_t lastTime;
+  static uint64_t lastUser, lastKernel;
+  static double lastUserRes, lastKernelRes;
+
+  pstatus_t pss;
+  psinfo_t  info;
+
+  *pjvmKernelLoad = *pjvmUserLoad = *psystemTotalLoad = 0;
+  if (get_info("/proc/self/status", &pss.pr_utime, sizeof(timestruc_t)*2, offsetof(pstatus_t, pr_utime)) != 0) {
+    return OS_ERR;
+  }
+
+  if (get_psinfo(&info) != 0) {
+    return OS_ERR;
+  }
+
+  // get the total time in user, kernel and total time
+  // check ratios for 'lately' and multiply the 'recent load'.
+  uint64_t time   = (info.pr_time.tv_sec * NANOS_PER_SEC) + info.pr_time.tv_nsec;
+  uint64_t user   = (pss.pr_utime.tv_sec * NANOS_PER_SEC) + pss.pr_utime.tv_nsec;
+  uint64_t kernel = (pss.pr_stime.tv_sec * NANOS_PER_SEC) + pss.pr_stime.tv_nsec;
+  uint64_t diff   = time - lastTime;
+  double load     = (double) info.pr_pctcpu / 0x8000;
+
+  if (diff > 0) {
+    lastUserRes = (load * (user - lastUser)) / diff;
+    lastKernelRes = (load * (kernel - lastKernel)) / diff;
+
+    // BUG9182835 - patch for clamping these values to sane ones.
+    lastUserRes   = MIN2<double>(1, lastUserRes);
+    lastUserRes   = MAX2<double>(0, lastUserRes);
+    lastKernelRes = MIN2<double>(1, lastKernelRes);
+    lastKernelRes = MAX2<double>(0, lastKernelRes);
+  }
+
+  double t = .0;
+  cpu_load(-1, &t);
+  // clamp at user+system and 1.0
+  if (lastUserRes + lastKernelRes > t) {
+    t = MIN2<double>(lastUserRes + lastKernelRes, 1.0);
+  }
+
+  *pjvmUserLoad   = lastUserRes;
+  *pjvmKernelLoad = lastKernelRes;
+  *psystemTotalLoad = t;
+
+  lastTime   = time;
+  lastUser   = user;
+  lastKernel = kernel;
+
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::context_switch_rate(double* rate) {
+  return perf_context_switch_rate(&_counters, rate);
+}
+
+CPUPerformanceInterface::CPUPerformanceInterface() {
+  _impl = NULL;
+}
+
+bool CPUPerformanceInterface::initialize() {
+  _impl = new CPUPerformanceInterface::CPUPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+CPUPerformanceInterface::~CPUPerformanceInterface(void) {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+int CPUPerformanceInterface::cpu_load(int which_logical_cpu, double* const cpu_load) const {
+  return _impl->cpu_load(which_logical_cpu, cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_load_total_process(double* const cpu_load) const {
+  return _impl->cpu_load_total_process(cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_loads_process(double* const pjvmUserLoad, double* const pjvmKernelLoad, double* const psystemTotalLoad) const {
+  return _impl->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotalLoad);
+}
+
+int CPUPerformanceInterface::context_switch_rate(double* const rate) const {
+  return _impl->context_switch_rate(rate);
+}
+
+class SystemProcessInterface::SystemProcesses : public CHeapObj<mtInternal> {
+  friend class SystemProcessInterface;
+ private:
+  class ProcessIterator : public CHeapObj<mtInternal> {
+    friend class SystemProcessInterface::SystemProcesses;
+   private:
+    DIR*           _dir;
+    struct dirent* _entry;
+    bool           _valid;
+
+    ProcessIterator();
+    ~ProcessIterator();
+    bool initialize();
+
+    bool is_valid() const { return _valid; }
+    bool is_valid_entry(struct dirent* const entry) const;
+    bool is_dir(const char* const name) const;
+    char* allocate_string(const char* const str) const;
+    int current(SystemProcess* const process_info);
+    int next_process();
+  };
+
+  ProcessIterator* _iterator;
+  SystemProcesses();
+  bool initialize();
+  ~SystemProcesses();
+
+  //information about system processes
+  int system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const;
+};
+
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_dir(const char* name) const {
+  struct stat64 mystat;
+  int ret_val = 0;
+
+  ret_val = ::stat64(name, &mystat);
+
+  if (ret_val < 0) {
+    return false;
+  }
+  ret_val = S_ISDIR(mystat.st_mode);
+  return ret_val > 0;
+}
+
+// if it has a numeric name, is a directory and has a 'psinfo' file in it
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_valid_entry(struct dirent* entry) const {
+  // ignore the "." and ".." directories
+  if ((strcmp(entry->d_name, ".") == 0) ||
+      (strcmp(entry->d_name, "..") == 0)) {
+    return false;
+  }
+
+  char buffer[PATH_MAX] = {0};
+  uint64_t size = 0;
+  bool result = false;
+  FILE *fp = NULL;
+
+  if (atoi(entry->d_name) != 0) {
+    jio_snprintf(buffer, PATH_MAX, "/proc/%s", entry->d_name);
+
+    if (is_dir(buffer)) {
+      memset(buffer, 0, PATH_MAX);
+      jio_snprintf(buffer, PATH_MAX, "/proc/%s/psinfo", entry->d_name);
+      if ((fp = fopen(buffer, "r")) != NULL) {
+        int nread = 0;
+        psinfo_t psinfo_data;
+        if ((nread = fread(&psinfo_data, 1, sizeof(psinfo_t), fp)) != -1) {
+          // only considering system process owned by root
+          if (psinfo_data.pr_uid == 0) {
+            result = true;
+          }
+        }
+      }
+    }
+  }
+
+  if (fp != NULL) {
+    fclose(fp);
+  }
+
+  return result;
+}
+
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
+  if (str != NULL) {
+    size_t len = strlen(str);
+    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
+    strncpy(tmp, str, len);
+    tmp[len] = '\0';
+    return tmp;
+  }
+  return NULL;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::current(SystemProcess* process_info) {
+  if (!is_valid()) {
+    return OS_ERR;
+  }
+
+  char psinfo_path[PATH_MAX] = {0};
+  jio_snprintf(psinfo_path, PATH_MAX, "/proc/%s/psinfo", _entry->d_name);
+
+  FILE *fp = NULL;
+  if ((fp = fopen(psinfo_path, "r")) == NULL) {
+    return OS_ERR;
+  }
+
+  int nread = 0;
+  psinfo_t psinfo_data;
+  if ((nread = fread(&psinfo_data, 1, sizeof(psinfo_t), fp)) == -1) {
+    fclose(fp);
+    return OS_ERR;
+  }
+
+  char *exe_path = NULL;
+  if ((psinfo_data.pr_fname != NULL) &&
+      (psinfo_data.pr_psargs != NULL)) {
+    char *path_substring = strstr(psinfo_data.pr_psargs, psinfo_data.pr_fname);
+    if (path_substring != NULL) {
+      int len = path_substring - psinfo_data.pr_psargs;
+      exe_path = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
+      if (exe_path != NULL) {
+        jio_snprintf(exe_path, len, "%s", psinfo_data.pr_psargs);
+        exe_path[len] = '\0';
+      }
+    }
+  }
+
+  process_info->set_pid(atoi(_entry->d_name));
+  process_info->set_name(allocate_string(psinfo_data.pr_fname));
+  process_info->set_path(allocate_string(exe_path));
+  process_info->set_command_line(allocate_string(psinfo_data.pr_psargs));
+
+  if (exe_path != NULL) {
+    FREE_C_HEAP_ARRAY(char, exe_path, mtInternal);
+  }
+
+  if (fp != NULL) {
+    fclose(fp);
+  }
+
+  return OS_OK;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::next_process() {
+  if (!is_valid()) {
+    return OS_ERR;
+  }
+
+  do {
+    _entry = os::readdir(_dir);
+    if (_entry == NULL) {
+      // Error or reached end.  Could use errno to distinguish those cases.
+      _valid = false;
+      return OS_ERR;
+    }
+  } while(!is_valid_entry(_entry));
+
+  _valid = true;
+  return OS_OK;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::ProcessIterator() {
+  _dir = NULL;
+  _entry = NULL;
+  _valid = false;
+}
+
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::initialize() {
+  _dir = os::opendir("/proc");
+  _entry = NULL;
+  _valid = true;
+  next_process();
+
+  return true;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::~ProcessIterator() {
+  if (_dir != NULL) {
+    os::closedir(_dir);
+  }
+}
+
+SystemProcessInterface::SystemProcesses::SystemProcesses() {
+  _iterator = NULL;
+}
+
+bool SystemProcessInterface::SystemProcesses::initialize() {
+  _iterator = new SystemProcessInterface::SystemProcesses::ProcessIterator();
+  return _iterator != NULL && _iterator->initialize();
+}
+
+SystemProcessInterface::SystemProcesses::~SystemProcesses() {
+  if (_iterator != NULL) {
+    delete _iterator;
+  }
+}
+
+int SystemProcessInterface::SystemProcesses::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const {
+  assert(system_processes != NULL, "system_processes pointer is NULL!");
+  assert(no_of_sys_processes != NULL, "system_processes counter pointer is NULL!");
+  assert(_iterator != NULL, "iterator is NULL!");
+
+  // initialize pointers
+  *no_of_sys_processes = 0;
+  *system_processes = NULL;
+
+  while (_iterator->is_valid()) {
+    SystemProcess* tmp = new SystemProcess();
+    _iterator->current(tmp);
+
+    //if already existing head
+    if (*system_processes != NULL) {
+      //move "first to second"
+      tmp->set_next(*system_processes);
+    }
+    // new head
+    *system_processes = tmp;
+    // increment
+    (*no_of_sys_processes)++;
+    // step forward
+    _iterator->next_process();
+  }
+  return OS_OK;
+}
+
+int SystemProcessInterface::system_processes(SystemProcess** system_procs, int* const no_of_sys_processes) const {
+  return _impl->system_processes(system_procs, no_of_sys_processes);
+}
+
+SystemProcessInterface::SystemProcessInterface() {
+  _impl = NULL;
+}
+
+bool SystemProcessInterface::initialize() {
+  _impl = new SystemProcessInterface::SystemProcesses();
+  return _impl != NULL && _impl->initialize();
+
+}
+
+SystemProcessInterface::~SystemProcessInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+CPUInformationInterface::CPUInformationInterface() {
+  _cpu_info = NULL;
+}
+
+bool CPUInformationInterface::initialize() {
+  _cpu_info = new CPUInformation();
+  if (_cpu_info == NULL) {
+    return false;
+  }
+  _cpu_info->set_number_of_hardware_threads(VM_Version_Ext::number_of_threads());
+  _cpu_info->set_number_of_cores(VM_Version_Ext::number_of_cores());
+  _cpu_info->set_number_of_sockets(VM_Version_Ext::number_of_sockets());
+  _cpu_info->set_cpu_name(VM_Version_Ext::cpu_name());
+  _cpu_info->set_cpu_description(VM_Version_Ext::cpu_description());
+  return true;
+}
+
+CPUInformationInterface::~CPUInformationInterface() {
+  if (_cpu_info != NULL) {
+    if (_cpu_info->cpu_name() != NULL) {
+      const char* cpu_name = _cpu_info->cpu_name();
+      FREE_C_HEAP_ARRAY(char, cpu_name, mtInternal);
+      _cpu_info->set_cpu_name(NULL);
+    }
+    if (_cpu_info->cpu_description() != NULL) {
+      const char* cpu_desc = _cpu_info->cpu_description();
+      FREE_C_HEAP_ARRAY(char, cpu_desc, mtInternal);
+      _cpu_info->set_cpu_description(NULL);
+    }
+    delete _cpu_info;
+  }
+}
+
+int CPUInformationInterface::cpu_information(CPUInformation& cpu_info) {
+  if (_cpu_info == NULL) {
+    return OS_ERR;
+  }
+
+  cpu_info = *_cpu_info; // shallow copy assignment
+  return OS_OK;
+}
+
+class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtInternal> {
+  friend class NetworkPerformanceInterface;
+ private:
+  NetworkPerformance();
+  NetworkPerformance(const NetworkPerformance& rhs); // no impl
+  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  bool initialize();
+  ~NetworkPerformance();
+  int network_utilization(NetworkInterface** network_interfaces) const;
+};
+
+NetworkPerformanceInterface::NetworkPerformance::NetworkPerformance() {
+
+}
+
+bool NetworkPerformanceInterface::NetworkPerformance::initialize() {
+  return true;
+}
+
+NetworkPerformanceInterface::NetworkPerformance::~NetworkPerformance() {
+
+}
+
+int NetworkPerformanceInterface::NetworkPerformance::network_utilization(NetworkInterface** network_interfaces) const
+{
+  kstat_ctl_t* ctl = kstat_open();
+  if (ctl == NULL) {
+    return OS_ERR;
+  }
+
+  NetworkInterface* ret = NULL;
+  for (kstat_t* k = ctl->kc_chain; k != NULL; k = k->ks_next) {
+    if (strcmp(k->ks_class, "net") != 0) {
+      continue;
+    }
+    if (strcmp(k->ks_module, "link") != 0) {
+      continue;
+    }
+
+    if (kstat_read(ctl, k, NULL) == -1) {
+      return OS_ERR;
+    }
+
+    uint64_t bytes_in = UINT64_MAX;
+    uint64_t bytes_out = UINT64_MAX;
+    for (int i = 0; i < k->ks_ndata; ++i) {
+      kstat_named_t* data = &reinterpret_cast<kstat_named_t*>(k->ks_data)[i];
+      if (strcmp(data->name, "rbytes64") == 0) {
+        bytes_in = data->value.ui64;
+      }
+      else if (strcmp(data->name, "obytes64") == 0) {
+        bytes_out = data->value.ui64;
+      }
+    }
+
+    if ((bytes_in != UINT64_MAX) && (bytes_out != UINT64_MAX)) {
+      NetworkInterface* cur = new NetworkInterface(k->ks_name, bytes_in, bytes_out, ret);
+      ret = cur;
+    }
+  }
+
+  kstat_close(ctl);
+  *network_interfaces = ret;
+
+  return OS_OK;
+}
+
+NetworkPerformanceInterface::NetworkPerformanceInterface() {
+  _impl = NULL;
+}
+
+NetworkPerformanceInterface::~NetworkPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+bool NetworkPerformanceInterface::initialize() {
+  _impl = new NetworkPerformanceInterface::NetworkPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+int NetworkPerformanceInterface::network_utilization(NetworkInterface** network_interfaces) const {
+  return _impl->network_utilization(network_interfaces);
+}
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1817,6 +1817,43 @@
   dlclose(handle);
 }
 
+int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
+  Dl_info dli;
+  // Sanity check?
+  if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
+      dli.dli_fname == NULL) {
+    return 1;
+  }
+
+  void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
+  if (handle == NULL) {
+    return 1;
+  }
+
+  Link_map *map;
+  dlinfo(handle, RTLD_DI_LINKMAP, &map);
+  if (map == NULL) {
+    dlclose(handle);
+    return 1;
+  }
+
+  while (map->l_prev != NULL) {
+    map = map->l_prev;
+  }
+
+  while (map != NULL) {
+    // Iterate through all map entries and call callback with fields of interest
+    if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
+      dlclose(handle);
+      return 1;
+    }
+    map = map->l_next;
+  }
+
+  dlclose(handle);
+  return 0;
+}
+
   // Loads .dll/.so and
   // in case of error it checks if .dll/.so was built for the
   // same architecture as Hotspot is running on
@@ -3259,6 +3296,15 @@
   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  size_t res;
+  JavaThread* thread = (JavaThread*)Thread::current();
+  assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
+  ThreadBlockInVM tbiv(thread);
+  RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
+  return res;
+}
+
 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/iphlp_interface.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "iphlp_interface.hpp"
+#include "runtime/os.hpp"
+
+// IPHLP API
+typedef DWORD(WINAPI *GetIfTable2_Fn)(PMIB_IF_TABLE2*);
+typedef DWORD(WINAPI *FreeMibTable_Fn)(PVOID);
+
+// IPHLP statics
+GetIfTable2_Fn IphlpDll::_GetIfTable2 = NULL;
+FreeMibTable_Fn IphlpDll::_FreeMibTable = NULL;
+
+LONG IphlpDll::_critical_section = 0;
+LONG IphlpDll::_initialized = 0;
+LONG IphlpDll::_iphlp_reference_count = 0;
+HMODULE IphlpDll::_hModule = NULL;
+
+void IphlpDll::initialize(void) {
+  _hModule = os::win32::load_Windows_dll("iphlpapi.dll", NULL, 0);
+
+  if (NULL == _hModule) {
+    return;
+  }
+
+  // The 'A' at the end means the ANSI (not the UNICODE) vesions of the methods
+  _GetIfTable2 = (GetIfTable2_Fn)::GetProcAddress(_hModule, "GetIfTable2");
+  _FreeMibTable = (FreeMibTable_Fn)::GetProcAddress(_hModule, "FreeMibTable");
+
+  // interlock is used for fencing
+  InterlockedExchange(&_initialized, 1);
+}
+
+bool IphlpDll::IphlpDetach(void) {
+  LONG prev_ref_count = InterlockedExchangeAdd(&_iphlp_reference_count, -1);
+  BOOL ret = false;
+
+  if (1 == prev_ref_count) {
+    if (_initialized && _hModule != NULL) {
+      ret = FreeLibrary(_hModule);
+      if (ret) {
+        _hModule = NULL;
+        _GetIfTable2 = NULL;
+        _FreeMibTable = NULL;
+        InterlockedExchange(&_initialized, 0);
+      }
+    }
+  }
+  return ret != 0;
+}
+
+bool IphlpDll::IphlpAttach(void) {
+  InterlockedExchangeAdd(&_iphlp_reference_count, 1);
+
+  if (1 == _initialized) {
+    return true;
+  }
+
+  while (InterlockedCompareExchange(&_critical_section, 1, 0) == 1);
+
+  if (0 == _initialized) {
+    initialize();
+  }
+
+  while (InterlockedCompareExchange(&_critical_section, 0, 1) == 0);
+
+  return (_GetIfTable2 != NULL && _FreeMibTable != NULL);
+}
+
+DWORD IphlpDll::GetIfTable2(PMIB_IF_TABLE2* Table) {
+  assert(_initialized && _GetIfTable2 != NULL,
+         "IphlpAttach() not yet called");
+
+  return _GetIfTable2(Table);
+}
+
+DWORD IphlpDll::FreeMibTable(PVOID Memory) {
+  assert(_initialized && _FreeMibTable != NULL,
+         "IphlpAttach() not yet called");
+
+  return _FreeMibTable(Memory);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/iphlp_interface.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_WINDOWS_VM_IPHLP_INTERFACE_HPP
+#define OS_WINDOWS_VM_IPHLP_INTERFACE_HPP
+
+#include <WinSock2.h>
+#include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
+#include <ws2ipdef.h>
+#include <iphlpapi.h>
+
+class IphlpDll : public AllStatic {
+ private:
+  static LONG       _iphlp_reference_count;
+  static LONG       _critical_section;
+  static LONG       _initialized;
+  static HMODULE    _hModule;
+  static void       initialize(void);
+  static DWORD(WINAPI *_GetIfTable2)(PMIB_IF_TABLE2*);
+  static DWORD(WINAPI *_FreeMibTable)(PVOID);
+
+ public:
+  static DWORD GetIfTable2(PMIB_IF_TABLE2*);
+  static DWORD FreeMibTable(PVOID);
+  static bool       IphlpAttach(void);
+  static bool       IphlpDetach(void);
+};
+
+#endif // OS_WINDOWS_VM_IPHLP_INTERFACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/os_perf_windows.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,1456 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "iphlp_interface.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "pdh_interface.hpp"
+#include "runtime/os_perf.hpp"
+#include "runtime/os.hpp"
+#include "utilities/macros.hpp"
+#include "vm_version_ext_x86.hpp"
+#include <math.h>
+#include <psapi.h>
+#include <TlHelp32.h>
+
+/*
+ * Windows provides a vast plethora of performance objects and counters,
+ * consumption of which is assisted using the Performance Data Helper (PDH) interface.
+ * We import a selected few api entry points from PDH, see pdh_interface.hpp.
+ *
+ * The code located in this file is to a large extent an abstraction over much of the
+ * plumbing needed to start consuming an object and/or counter of choice.
+ *
+ */
+
+ /*
+ * How to use:
+ * 1. Create query
+ * 2. Add counters to the query
+ * 3. Collect the performance data using the query
+ * 4. Display the performance data using the counters associated with the query
+ * 5. Destroy query (counter destruction implied)
+ */
+
+/*
+ * Every PDH artifact, like processor, process, thread, memory, and so forth are
+ * identified with an index that is always the same irrespective
+ * of the localized version of the operating system or service pack installed.
+ * INFO: Using PDH APIs Correctly in a Localized Language (Q287159)
+ *   http://support.microsoft.com/default.aspx?scid=kb;EN-US;q287159
+ *
+ * To find the correct index for an object or counter, inspect the registry key / value:
+ * [HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\009\Counter]
+ *
+ * some common PDH indexes
+ */
+static const DWORD PDH_PROCESSOR_IDX = 238;
+static const DWORD PDH_PROCESSOR_TIME_IDX = 6;
+static const DWORD PDH_PRIV_PROCESSOR_TIME_IDX = 144;
+static const DWORD PDH_PROCESS_IDX = 230;
+static const DWORD PDH_ID_PROCESS_IDX = 784;
+static const DWORD PDH_CONTEXT_SWITCH_RATE_IDX = 146;
+static const DWORD PDH_SYSTEM_IDX = 2;
+
+/* useful pdh fmt's */
+static const char* const OBJECT_COUNTER_FMT = "\\%s\\%s";
+static const size_t OBJECT_COUNTER_FMT_LEN = 2;
+static const char* const OBJECT_WITH_INSTANCES_COUNTER_FMT = "\\%s(%s)\\%s";
+static const size_t OBJECT_WITH_INSTANCES_COUNTER_FMT_LEN = 4;
+static const char* const PROCESS_OBJECT_INSTANCE_COUNTER_FMT = "\\%s(%s#%s)\\%s";
+static const size_t PROCESS_OBJECT_INSTANCE_COUNTER_FMT_LEN = 5;
+
+static const char* process_image_name = NULL; // for example, "java" but could have another image name
+static char* pdh_IDProcess_counter_fmt = NULL;   // "\Process(java#%d)\ID Process" */
+
+// Need to limit how often we update a query to minimize the heisenberg effect.
+// (PDH behaves erratically if the counters are queried too often, especially counters that
+// store and use values from two consecutive updates, like cpu load.)
+static const int min_update_interval_millis = 500;
+
+/*
+* Structs for PDH queries.
+*/
+typedef struct {
+  HQUERY query;
+  s8     lastUpdate; // Last time query was updated (current millis).
+} UpdateQueryS, *UpdateQueryP;
+
+
+typedef struct {
+  UpdateQueryS query;
+  HCOUNTER     counter;
+  bool         initialized;
+} CounterQueryS, *CounterQueryP;
+
+typedef struct {
+  UpdateQueryS query;
+  HCOUNTER*    counters;
+  int          noOfCounters;
+  bool         initialized;
+} MultiCounterQueryS, *MultiCounterQueryP;
+
+typedef struct {
+  MultiCounterQueryP queries;
+  int                size;
+  bool               initialized;
+} MultiCounterQuerySetS, *MultiCounterQuerySetP;
+
+typedef struct {
+  MultiCounterQuerySetS set;
+  int                   process_index;
+} ProcessQueryS, *ProcessQueryP;
+
+static void pdh_cleanup(HQUERY* const query, HCOUNTER* const counter) {
+  if (counter != NULL && *counter != NULL) {
+    PdhDll::PdhRemoveCounter(*counter);
+    *counter = NULL;
+  }
+  if (query != NULL && *query != NULL) {
+    PdhDll::PdhCloseQuery(*query);
+    *query = NULL;
+  }
+}
+
+static CounterQueryP create_counter_query() {
+  CounterQueryP const query = NEW_C_HEAP_ARRAY(CounterQueryS, 1, mtInternal);
+  memset(query, 0, sizeof(CounterQueryS));
+  return query;
+}
+
+static void destroy_counter_query(CounterQueryP query) {
+  assert(query != NULL, "invariant");
+  pdh_cleanup(&query->query.query, &query->counter);
+  FREE_C_HEAP_ARRAY(CounterQueryS, query, mtInternal);
+}
+
+static MultiCounterQueryP create_multi_counter_query() {
+  MultiCounterQueryP const query = NEW_C_HEAP_ARRAY(MultiCounterQueryS, 1, mtInternal);
+  memset(query, 0, sizeof(MultiCounterQueryS));
+  return query;
+}
+
+static void destroy_counter_query(MultiCounterQueryP counter_query) {
+  if (counter_query != NULL) {
+    for (int i = 0; i < counter_query->noOfCounters; ++i) {
+      pdh_cleanup(NULL, &counter_query->counters[i]);
+    }
+    FREE_C_HEAP_ARRAY(char, counter_query->counters, mtInternal);
+    pdh_cleanup(&counter_query->query.query, NULL);
+    FREE_C_HEAP_ARRAY(MultiCounterQueryS, counter_query, mtInternal);
+  }
+}
+
+static void destroy_multi_counter_query(MultiCounterQuerySetP counter_query_set) {
+  for (int i = 0; i < counter_query_set->size; i++) {
+    for (int j = 0; j < counter_query_set->queries[i].noOfCounters; ++j) {
+      pdh_cleanup(NULL, &counter_query_set->queries[i].counters[j]);
+    }
+    FREE_C_HEAP_ARRAY(char, counter_query_set->queries[i].counters, mtInternal);
+    pdh_cleanup(&counter_query_set->queries[i].query.query, NULL);
+  }
+  FREE_C_HEAP_ARRAY(MultiCounterQueryS, counter_query_set->queries, mtInternal);
+}
+
+static void destroy_counter_query(MultiCounterQuerySetP counter_query_set) {
+  destroy_multi_counter_query(counter_query_set);
+  FREE_C_HEAP_ARRAY(MultiCounterQuerySetS, counter_query_set, mtInternal);
+}
+
+static void destroy_counter_query(ProcessQueryP process_query) {
+  destroy_multi_counter_query(&process_query->set);
+  FREE_C_HEAP_ARRAY(ProcessQueryS, process_query, mtInternal);
+}
+
+static int open_query(HQUERY* query) {
+  return PdhDll::PdhOpenQuery(NULL, 0, query);
+}
+
+template <typename QueryP>
+static int open_query(QueryP query) {
+  return open_query(&query->query);
+}
+
+static int allocate_counters(MultiCounterQueryP query, size_t nofCounters) {
+  assert(query != NULL, "invariant");
+  assert(!query->initialized, "invariant");
+  assert(0 == query->noOfCounters, "invariant");
+  assert(query->counters == NULL, "invariant");
+  query->counters = (HCOUNTER*)NEW_C_HEAP_ARRAY(char, nofCounters * sizeof(HCOUNTER), mtInternal);
+  if (query->counters == NULL) {
+    return OS_ERR;
+  }
+  memset(query->counters, 0, nofCounters * sizeof(HCOUNTER));
+  query->noOfCounters = (int)nofCounters;
+  return OS_OK;
+}
+
+static int allocate_counters(MultiCounterQuerySetP query_set, size_t nofCounters) {
+  assert(query_set != NULL, "invariant");
+  assert(!query_set->initialized, "invariant");
+  for (int i = 0; i < query_set->size; ++i) {
+    if (allocate_counters(&query_set->queries[i], nofCounters) != OS_OK) {
+      return OS_ERR;
+    }
+  }
+  return OS_OK;
+}
+
+static int allocate_counters(ProcessQueryP process_query, size_t nofCounters) {
+  assert(process_query != NULL, "invariant");
+  return allocate_counters(&process_query->set, nofCounters);
+}
+
+static void deallocate_counters(MultiCounterQueryP query) {
+  if (query->counters != NULL) {
+    FREE_C_HEAP_ARRAY(char, query->counters, mtInternal);
+    query->counters = NULL;
+    query->noOfCounters = 0;
+  }
+}
+
+static OSReturn add_counter(UpdateQueryP query, HCOUNTER* counter, const char* path, bool first_sample_on_init) {
+  assert(query != NULL, "invariant");
+  assert(counter != NULL, "invariant");
+  assert(path != NULL, "invariant");
+  if (query->query == NULL) {
+    if (open_query(query) != ERROR_SUCCESS) {
+      return OS_ERR;
+    }
+  }
+  assert(query->query != NULL, "invariant");
+  PDH_STATUS status = PdhDll::PdhAddCounter(query->query, path, 0, counter);
+  if (PDH_CSTATUS_NO_OBJECT == status || PDH_CSTATUS_NO_COUNTER == status) {
+    return OS_ERR;
+  }
+  /*
+  * According to the MSDN documentation, rate counters must be read twice:
+  *
+  * "Obtaining the value of rate counters such as Page faults/sec requires that
+  *  PdhCollectQueryData be called twice, with a specific time interval between
+  *  the two calls, before calling PdhGetFormattedCounterValue. Call Sleep to
+  *  implement the waiting period between the two calls to PdhCollectQueryData."
+  *
+  *  Take the first sample here already to allow for the next "real" sample
+  *  to succeed.
+  */
+  if (first_sample_on_init) {
+    PdhDll::PdhCollectQueryData(query->query);
+  }
+  return OS_OK;
+}
+
+template <typename QueryP>
+static OSReturn add_counter(QueryP counter_query, HCOUNTER* counter, const char* path, bool first_sample_on_init) {
+  assert(counter_query != NULL, "invariant");
+  assert(counter != NULL, "invariant");
+  assert(path != NULL, "invariant");
+  return add_counter(&counter_query->query, counter, path, first_sample_on_init);
+}
+
+static OSReturn add_counter(CounterQueryP counter_query, const char* path, bool first_sample_on_init) {
+  if (add_counter(counter_query, &counter_query->counter, path, first_sample_on_init) != OS_OK) {
+    // performance counter might be disabled in the registry
+    return OS_ERR;
+  }
+  counter_query->initialized = true;
+  return OS_OK;
+}
+
+static OSReturn add_process_counter(MultiCounterQueryP query, int slot_index, const char* path, bool first_sample_on_init) {
+  assert(query != NULL, "invariant");
+  assert(slot_index < query->noOfCounters, "invariant");
+  assert(query->counters[slot_index] == NULL, "invariant");
+  const OSReturn ret = add_counter(query, &query->counters[slot_index], path, first_sample_on_init);
+  if (OS_OK == ret) {
+    if (slot_index + 1 == query->noOfCounters) {
+      query->initialized = true;
+    }
+  }
+  return ret;
+}
+
+static int collect_query_data(UpdateQueryP update_query) {
+  assert(update_query != NULL, "invariant");
+  const s8 now = os::javaTimeMillis();
+  if (now - update_query->lastUpdate > min_update_interval_millis) {
+    if (PdhDll::PdhCollectQueryData(update_query->query) != ERROR_SUCCESS) {
+      return OS_ERR;
+    }
+    update_query->lastUpdate = now;
+  }
+  return OS_OK;
+}
+
+template <typename Query>
+static int collect_query_data(Query* counter_query) {
+  assert(counter_query != NULL, "invariant");
+  return collect_query_data(&counter_query->query);
+}
+
+static int formatted_counter_value(HCOUNTER counter, DWORD format, PDH_FMT_COUNTERVALUE* const value) {
+  assert(value != NULL, "invariant");
+  if (PdhDll::PdhGetFormattedCounterValue(counter, format, NULL, value) != ERROR_SUCCESS) {
+    return OS_ERR;
+  }
+  return OS_OK;
+}
+
+/*
+* Working against the Process object and it's related counters is inherently problematic
+* when using the PDH API:
+*
+* Using PDH, a process is not primarily identified by the process id,
+* but with a sequential number, for example \Process(java#0), \Process(java#1), ...
+* The really bad part is that this list is reset as soon as a process exits:
+* If \Process(java#1) exits, \Process(java#3) now becomes \Process(java#2) etc.
+*
+* The PDH api requires a process identifier to be submitted when registering
+* a query, but as soon as the list resets, the query is invalidated (since the name changed).
+*
+* Solution:
+* The #number identifier for a Process query can only decrease after process creation.
+*
+* We therefore create an array of counter queries for all process object instances
+* up to and including ourselves:
+*
+* Ex. we come in as third process instance (java#2), we then create and register
+* queries for the following Process object instances:
+* java#0, java#1, java#2
+*
+* current_query_index_for_process() keeps track of the current "correct" query
+* (in order to keep this index valid when the list resets from underneath,
+* ensure to call current_query_index_for_process() before every query involving
+* Process object instance data).
+*
+* if unable to query, returns OS_ERR(-1)
+*/
+static int current_query_index_for_process() {
+  assert(process_image_name != NULL, "invariant");
+  assert(pdh_IDProcess_counter_fmt != NULL, "invariant");
+  HQUERY tmpQuery = NULL;
+  if (open_query(&tmpQuery) != ERROR_SUCCESS) {
+    return OS_ERR;
+  }
+  char counter[512];
+  HCOUNTER handle_counter = NULL;
+  // iterate over all instance indexes and try to find our own pid
+  for (int index = 0; index < max_intx; index++) {
+    jio_snprintf(counter, sizeof(counter) - 1, pdh_IDProcess_counter_fmt, index);
+    assert(strlen(counter) < sizeof(counter), "invariant");
+    if (PdhDll::PdhAddCounter(tmpQuery, counter, 0, &handle_counter) != ERROR_SUCCESS) {
+      pdh_cleanup(&tmpQuery, &handle_counter);
+      return OS_ERR;
+    }
+    const PDH_STATUS res = PdhDll::PdhCollectQueryData(tmpQuery);
+    if (res == PDH_INVALID_HANDLE || res == PDH_NO_DATA) {
+      pdh_cleanup(&tmpQuery, &handle_counter);
+      return OS_ERR;
+    } else {
+      PDH_FMT_COUNTERVALUE counter_value;
+      formatted_counter_value(handle_counter, PDH_FMT_LONG, &counter_value);
+      pdh_cleanup(NULL, &handle_counter);
+      if ((LONG)os::current_process_id() == counter_value.longValue) {
+        pdh_cleanup(&tmpQuery, NULL);
+        return index;
+      }
+    }
+  }
+  pdh_cleanup(&tmpQuery, NULL);
+  return OS_ERR;
+}
+
+static ProcessQueryP create_process_query() {
+  const int current_process_idx = current_query_index_for_process();
+  if (OS_ERR == current_process_idx) {
+    return NULL;
+  }
+  ProcessQueryP const process_query = NEW_C_HEAP_ARRAY(ProcessQueryS, 1, mtInternal);
+  memset(process_query, 0, sizeof(ProcessQueryS));
+  process_query->set.queries = NEW_C_HEAP_ARRAY(MultiCounterQueryS, current_process_idx + 1, mtInternal);
+  memset(process_query->set.queries, 0, sizeof(MultiCounterQueryS) * (current_process_idx + 1));
+  process_query->process_index = current_process_idx;
+  process_query->set.size = current_process_idx + 1;
+  assert(process_query->set.size > process_query->process_index, "invariant");
+  return process_query;
+}
+
+static MultiCounterQueryP current_process_counter_query(ProcessQueryP process_query) {
+  assert(process_query != NULL, "invariant");
+  assert(process_query->process_index < process_query->set.size, "invariant");
+  return &process_query->set.queries[process_query->process_index];
+}
+
+static void clear_multi_counter(MultiCounterQueryP query) {
+  for (int i = 0; i < query->noOfCounters; ++i) {
+    pdh_cleanup(NULL, &query->counters[i]);
+  }
+  pdh_cleanup(&query->query.query, NULL);
+  query->initialized = false;
+}
+
+static int ensure_valid_process_query_index(ProcessQueryP process_query) {
+  assert(process_query != NULL, "invariant");
+  const int previous_process_idx = process_query->process_index;
+  if (previous_process_idx == 0) {
+    return previous_process_idx;
+  }
+  const int current_process_idx = current_query_index_for_process();
+  if (current_process_idx == previous_process_idx || OS_ERR == current_process_idx ||
+    current_process_idx >= process_query->set.size) {
+    return previous_process_idx;
+  }
+
+  assert(current_process_idx >= 0 && current_process_idx < process_query->set.size, "out of bounds!");
+  while (current_process_idx < process_query->set.size - 1) {
+    const int new_size = --process_query->set.size;
+    clear_multi_counter(&process_query->set.queries[new_size]);
+  }
+  assert(current_process_idx < process_query->set.size, "invariant");
+  process_query->process_index = current_process_idx;
+  return current_process_idx;
+}
+
+static MultiCounterQueryP current_process_query(ProcessQueryP process_query) {
+  assert(process_query != NULL, "invariant");
+  const int current_process_idx = ensure_valid_process_query_index(process_query);
+  assert(current_process_idx == process_query->process_index, "invariant");
+  assert(current_process_idx < process_query->set.size, "invariant");
+  return &process_query->set.queries[current_process_idx];
+}
+
+static int collect_process_query_data(ProcessQueryP process_query) {
+  assert(process_query != NULL, "invariant");
+  return collect_query_data(current_process_query(process_query));
+}
+
+static int query_process_counter(ProcessQueryP process_query, int slot_index, DWORD format, PDH_FMT_COUNTERVALUE* const value) {
+  MultiCounterQueryP const current_query = current_process_counter_query(process_query);
+  assert(current_query != NULL, "invariant");
+  assert(slot_index < current_query->noOfCounters, "invariant");
+  assert(current_query->counters[slot_index] != NULL, "invariant");
+  return formatted_counter_value(current_query->counters[slot_index], format, value);
+}
+
+/*
+ * Construct a fully qualified PDH path
+ *
+ * @param objectName   a PDH Object string representation(required)
+ * @param counterName  a PDH Counter string representation(required)
+ * @param imageName    a process image name string, ex. "java" (opt)
+ * @param instance     an instance string, ex. "0", "1", ... (opt)
+ * @return             the fully qualified PDH path.
+ *
+ * Caller will need a ResourceMark.
+ *
+ * (PdhMakeCounterPath() seems buggy on concatenating instances, hence this function instead)
+ */
+static const char* make_fully_qualified_counter_path(const char* object_name,
+                                                     const char* counter_name,
+                                                     const char* image_name = NULL,
+                                                     const char* instance = NULL) {
+  assert(object_name != NULL, "invariant");
+  assert(counter_name != NULL, "invariant");
+  size_t full_counter_path_len = strlen(object_name) + strlen(counter_name);
+
+  char* full_counter_path;
+  size_t jio_snprintf_result = 0;
+  if (image_name) {
+    /*
+    * For paths using the "Process" Object.
+    *
+    * Examples:
+    * form:   "\object_name(image_name#instance)\counter_name"
+    * actual: "\Process(java#2)\ID Process"
+    */
+    full_counter_path_len += PROCESS_OBJECT_INSTANCE_COUNTER_FMT_LEN;
+    full_counter_path_len += strlen(image_name);
+    /*
+    * image_name must be passed together with an associated
+    * instance "number" ("0", "1", "2", ...).
+    * This is required in order to create valid "Process" Object paths.
+    *
+    * Examples: "\Process(java#0)", \Process(java#1"), ...
+    */
+    assert(instance != NULL, "invariant");
+    full_counter_path_len += strlen(instance);
+    full_counter_path = NEW_RESOURCE_ARRAY_RETURN_NULL(char, full_counter_path_len + 1);
+    if (full_counter_path == NULL) {
+      return NULL;
+    }
+    jio_snprintf_result = jio_snprintf(full_counter_path,
+                                       full_counter_path_len + 1,
+                                       PROCESS_OBJECT_INSTANCE_COUNTER_FMT,
+                                       object_name,
+                                       image_name,
+                                       instance,
+                                       counter_name);
+  } else {
+    if (instance) {
+      /*
+      * For paths where the Object has multiple instances.
+      *
+      * Examples:
+      * form:   "\object_name(instance)\counter_name"
+      * actual: "\Processor(0)\% Privileged Time"
+      */
+      full_counter_path_len += strlen(instance);
+      full_counter_path_len += OBJECT_WITH_INSTANCES_COUNTER_FMT_LEN;
+    } else {
+      /*
+      * For "normal" paths.
+      *
+      * Examples:
+      * form:   "\object_name\counter_name"
+      * actual: "\Memory\Available Mbytes"
+      */
+      full_counter_path_len += OBJECT_COUNTER_FMT_LEN;
+    }
+    full_counter_path = NEW_RESOURCE_ARRAY_RETURN_NULL(char, full_counter_path_len + 1);
+    if (full_counter_path == NULL) {
+      return NULL;
+    }
+    if (instance) {
+      jio_snprintf_result = jio_snprintf(full_counter_path,
+                                         full_counter_path_len + 1,
+                                         OBJECT_WITH_INSTANCES_COUNTER_FMT,
+                                         object_name,
+                                         instance,
+                                         counter_name);
+    } else {
+      jio_snprintf_result = jio_snprintf(full_counter_path,
+                                         full_counter_path_len + 1,
+                                         OBJECT_COUNTER_FMT,
+                                         object_name,
+                                         counter_name);
+    }
+  }
+  assert(full_counter_path_len == jio_snprintf_result, "invariant");
+  return full_counter_path;
+}
+
+static void log_invalid_pdh_index(DWORD index) {
+  if (LogJFR) tty->print_cr("Unable to resolve PDH index: (%ld)", index);
+  if (LogJFR) tty->print_cr("Please check the registry if this performance object/counter is disabled");
+}
+
+static bool is_valid_pdh_index(DWORD index) {
+  DWORD dummy = 0;
+  if (PdhDll::PdhLookupPerfNameByIndex(NULL, index, NULL, &dummy) != PDH_MORE_DATA) {
+    log_invalid_pdh_index(index);
+    return false;
+  }
+  return true;
+}
+
+/*
+ * Maps an index to a resource area allocated string for the localized PDH artifact.
+ *
+ * Caller will need a ResourceMark.
+ *
+ * @param index    the counter index as specified in the registry
+ * @param ppBuffer pointer to a char*
+ * @return         OS_OK if successful, OS_ERR on failure.
+ */
+static OSReturn lookup_name_by_index(DWORD index, char** p_string) {
+  assert(p_string != NULL, "invariant");
+  if (!is_valid_pdh_index(index)) {
+    return OS_ERR;
+  }
+  // determine size needed
+  DWORD size = 0;
+  PDH_STATUS status = PdhDll::PdhLookupPerfNameByIndex(NULL, index, NULL, &size);
+  assert(status == PDH_MORE_DATA, "invariant");
+  *p_string = NEW_RESOURCE_ARRAY_RETURN_NULL(char, size);
+  if (*p_string== NULL) {
+    return OS_ERR;
+  }
+  if (PdhDll::PdhLookupPerfNameByIndex(NULL, index, *p_string, &size) != ERROR_SUCCESS) {
+    return OS_ERR;
+  }
+  if (0 == size || *p_string == NULL) {
+    return OS_ERR;
+  }
+  // windows vista does not null-terminate the string (although the docs says it will)
+  (*p_string)[size - 1] = '\0';
+  return OS_OK;
+}
+
+static const char* copy_string_to_c_heap(const char* string) {
+  assert(string != NULL, "invariant");
+  const size_t len = strlen(string);
+  char* const cheap_allocated_string = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
+  if (NULL == cheap_allocated_string) {
+    return NULL;
+  }
+  strncpy(cheap_allocated_string, string, len + 1);
+  return cheap_allocated_string;
+}
+
+/*
+* Maps an index to a resource area allocated string for the localized PDH artifact.
+*
+* Caller will need a ResourceMark.
+*
+* @param index    the counter index as specified in the registry
+* @return         localized pdh artifact string if successful, NULL on failure.
+*/
+static const char* pdh_localized_artifact(DWORD pdh_artifact_index) {
+  char* pdh_localized_artifact_string = NULL;
+  // get localized name from pdh artifact index
+  if (lookup_name_by_index(pdh_artifact_index, &pdh_localized_artifact_string) != OS_OK) {
+    return NULL;
+  }
+  return pdh_localized_artifact_string;
+}
+
+/*
+ * Returns the PDH string identifying the current process image name.
+ * Use this prefix when getting counters from the PDH process object
+ * representing your process.
+ * Ex. "Process(java#0)\Virtual Bytes" - where "java" is the PDH process
+ * image description.
+ *
+ * Caller needs ResourceMark.
+ *
+ * @return the process image description. NULL if the call failed.
+*/
+static const char* pdh_process_image_name() {
+  char* module_name = NEW_RESOURCE_ARRAY_RETURN_NULL(char, MAX_PATH);
+  if (NULL == module_name) {
+    return NULL;
+  }
+  // Find our module name and use it to extract the image name used by PDH
+  DWORD getmfn_return = GetModuleFileName(NULL, module_name, MAX_PATH);
+  if (getmfn_return >= MAX_PATH || 0 == getmfn_return) {
+    return NULL;
+  }
+  if (os::get_last_error() == ERROR_INSUFFICIENT_BUFFER) {
+    return NULL;
+  }
+  char* process_image_name = strrchr(module_name, '\\'); //drop path
+  process_image_name++;                                  //skip slash
+  char* dot_pos = strrchr(process_image_name, '.');      //drop .exe
+  dot_pos[0] = '\0';
+  return process_image_name;
+}
+
+static void deallocate_pdh_constants() {
+  if (process_image_name != NULL) {
+    FREE_C_HEAP_ARRAY(char, process_image_name, mtInternal);
+    process_image_name = NULL;
+  }
+  if (pdh_IDProcess_counter_fmt != NULL) {
+    FREE_C_HEAP_ARRAY(char, pdh_IDProcess_counter_fmt, mtInternal);
+    pdh_IDProcess_counter_fmt = NULL;
+  }
+}
+
+static int allocate_pdh_constants() {
+  assert(process_image_name == NULL, "invariant");
+  const char* pdh_image_name = pdh_process_image_name();
+  if (pdh_image_name == NULL) {
+    return OS_ERR;
+  }
+  process_image_name = copy_string_to_c_heap(pdh_image_name);
+
+  const char* pdh_localized_process_object = pdh_localized_artifact(PDH_PROCESS_IDX);
+  if (pdh_localized_process_object == NULL) {
+    return OS_ERR;
+  }
+
+  const char* pdh_localized_IDProcess_counter = pdh_localized_artifact(PDH_ID_PROCESS_IDX);
+  if (pdh_localized_IDProcess_counter == NULL) {
+    return OS_ERR;
+  }
+
+  size_t pdh_IDProcess_counter_fmt_len = strlen(process_image_name);
+  pdh_IDProcess_counter_fmt_len += strlen(pdh_localized_process_object);
+  pdh_IDProcess_counter_fmt_len += strlen(pdh_localized_IDProcess_counter);
+  pdh_IDProcess_counter_fmt_len += PROCESS_OBJECT_INSTANCE_COUNTER_FMT_LEN;
+  pdh_IDProcess_counter_fmt_len += 2; // "%d"
+
+  assert(pdh_IDProcess_counter_fmt == NULL, "invariant");
+  pdh_IDProcess_counter_fmt = NEW_C_HEAP_ARRAY_RETURN_NULL(char, pdh_IDProcess_counter_fmt_len + 1, mtInternal);
+  if (pdh_IDProcess_counter_fmt == NULL) {
+    return OS_ERR;
+  }
+
+  /* "\Process(java#%d)\ID Process" */
+  const size_t len = jio_snprintf(pdh_IDProcess_counter_fmt,
+                                  pdh_IDProcess_counter_fmt_len + 1,
+                                  PROCESS_OBJECT_INSTANCE_COUNTER_FMT,
+                                  pdh_localized_process_object,
+                                  process_image_name,
+                                  "%d",
+                                  pdh_localized_IDProcess_counter);
+
+  assert(pdh_IDProcess_counter_fmt != NULL, "invariant");
+  assert(len == pdh_IDProcess_counter_fmt_len, "invariant");
+  return OS_OK;
+}
+
+/*
+ * Enuerate the Processor PDH object and returns a buffer containing the enumerated instances.
+ * Caller needs ResourceMark;
+ *
+ * @return  buffer if successful, NULL on failure.
+*/
+static const char* enumerate_cpu_instances() {
+  char* processor; //'Processor' == PDH_PROCESSOR_IDX
+  if (lookup_name_by_index(PDH_PROCESSOR_IDX, &processor) != OS_OK) {
+    return NULL;
+  }
+  DWORD c_size = 0;
+  DWORD i_size = 0;
+  // enumerate all processors.
+  PDH_STATUS pdhStat = PdhDll::PdhEnumObjectItems(NULL, // reserved
+                                                  NULL, // local machine
+                                                  processor, // object to enumerate
+                                                  NULL,
+                                                  &c_size,
+                                                  NULL, // instance buffer is NULL and
+                                                  &i_size,  // pass 0 length in order to get the required size
+                                                  PERF_DETAIL_WIZARD, // counter detail level
+                                                  0);
+  if (PdhDll::PdhStatusFail((pdhStat))) {
+    return NULL;
+  }
+  char* const instances = NEW_RESOURCE_ARRAY_RETURN_NULL(char, i_size);
+  if (instances == NULL) {
+    return NULL;
+  }
+  c_size = 0;
+  pdhStat = PdhDll::PdhEnumObjectItems(NULL, // reserved
+                                       NULL, // local machine
+                                       processor, // object to enumerate
+                                       NULL,
+                                       &c_size,
+                                       instances, // now instance buffer is allocated to be filled in
+                                       &i_size, // and the required size is known
+                                       PERF_DETAIL_WIZARD, // counter detail level
+                                       0);
+  if (PdhDll::PdhStatusFail((pdhStat))) {
+    return NULL;
+  }
+  return instances;
+}
+
+static int count_logical_cpus(const char* instances) {
+  assert(instances != NULL, "invariant");
+  // count logical instances.
+  DWORD count;
+  char* tmp;
+  for (count = 0, tmp = const_cast<char*>(instances); *tmp != '\0'; tmp = &tmp[strlen(tmp) + 1], count++);
+  // PDH reports an instance for each logical processor plus an instance for the total (_Total)
+  assert(count == os::processor_count() + 1, "invalid enumeration!");
+  return count - 1;
+}
+
+static int number_of_logical_cpus() {
+  static int numberOfCPUS = 0;
+  if (numberOfCPUS == 0) {
+    const char* instances = enumerate_cpu_instances();
+    if (instances == NULL) {
+      return OS_ERR;
+    }
+    numberOfCPUS = count_logical_cpus(instances);
+  }
+  return numberOfCPUS;
+}
+
+static double cpu_factor() {
+  static DWORD  numCpus = 0;
+  static double cpuFactor = .0;
+  if (numCpus == 0) {
+    numCpus = number_of_logical_cpus();
+    assert(os::processor_count() <= (int)numCpus, "invariant");
+    cpuFactor = numCpus * 100;
+  }
+  return cpuFactor;
+}
+
+static void log_error_message_on_no_PDH_artifact(const char* full_counter_name) {
+  if (LogJFR) tty->print_cr("Unable to register PDH query for \"%s\"", full_counter_name);
+  if (LogJFR) tty->print_cr("Please check the registry if this performance object/counter is disabled");
+}
+
+static int initialize_cpu_query_counters(MultiCounterQueryP cpu_query, DWORD pdh_counter_idx) {
+  assert(cpu_query != NULL, "invariant");
+  assert(cpu_query->counters != NULL, "invariant");
+  char* processor; //'Processor' == PDH_PROCESSOR_IDX
+  if (lookup_name_by_index(PDH_PROCESSOR_IDX, &processor) != OS_OK) {
+    return OS_ERR;
+  }
+  char* counter_name = NULL;
+  if (lookup_name_by_index(pdh_counter_idx, &counter_name) != OS_OK) {
+    return OS_ERR;
+  }
+  if (cpu_query->query.query == NULL) {
+    if (open_query(cpu_query)) {
+      return OS_ERR;
+    }
+  }
+  assert(cpu_query->query.query != NULL, "invariant");
+  size_t counter_len = strlen(processor);
+  counter_len += strlen(counter_name);
+  counter_len += OBJECT_WITH_INSTANCES_COUNTER_FMT_LEN; // "\\%s(%s)\\%s"
+
+  DWORD index;
+  char* tmp;
+  const char* instances = enumerate_cpu_instances();
+  for (index = 0, tmp = const_cast<char*>(instances); *tmp != '\0'; tmp = &tmp[strlen(tmp) + 1], index++) {
+    const size_t tmp_len = strlen(tmp);
+    char* counter_path = NEW_RESOURCE_ARRAY_RETURN_NULL(char, counter_len + tmp_len + 1);
+    if (counter_path == NULL) {
+      return OS_ERR;
+    }
+    const size_t jio_snprintf_result = jio_snprintf(counter_path,
+                                                    counter_len + tmp_len + 1,
+                                                    OBJECT_WITH_INSTANCES_COUNTER_FMT,
+                                                    processor,
+                                                    tmp, // instance "0", "1", .."_Total"
+                                                    counter_name);
+    assert(counter_len + tmp_len == jio_snprintf_result, "invariant");
+    if (add_counter(cpu_query, &cpu_query->counters[index], counter_path, false) != OS_OK) {
+      // performance counter is disabled in registry and not accessible via PerfLib
+      log_error_message_on_no_PDH_artifact(counter_path);
+      // return OS_OK to have the system continue to run without the missing counter
+      return OS_OK;
+    }
+  }
+  cpu_query->initialized = true;
+  // Query once to initialize the counters which require at least two samples
+  // (like the % CPU usage) to calculate correctly.
+  collect_query_data(cpu_query);
+  return OS_OK;
+}
+
+static int initialize_cpu_query(MultiCounterQueryP cpu_query, DWORD pdh_counter_idx) {
+  assert(cpu_query != NULL, "invariant");
+  assert(!cpu_query->initialized, "invariant");
+  const int logical_cpu_count = number_of_logical_cpus();
+  assert(logical_cpu_count >= os::processor_count(), "invariant");
+  // we also add another counter for instance "_Total"
+  if (allocate_counters(cpu_query, logical_cpu_count + 1) != OS_OK) {
+    return OS_ERR;
+  }
+  assert(cpu_query->noOfCounters == logical_cpu_count + 1, "invariant");
+  return initialize_cpu_query_counters(cpu_query, pdh_counter_idx);
+}
+
+static int initialize_process_counter(ProcessQueryP process_query, int slot_index, DWORD pdh_counter_index) {
+  char* localized_process_object;
+  if (lookup_name_by_index(PDH_PROCESS_IDX, &localized_process_object) != OS_OK) {
+    return OS_ERR;
+  }
+  assert(localized_process_object != NULL, "invariant");
+  char* localized_counter_name;
+  if (lookup_name_by_index(pdh_counter_index, &localized_counter_name) != OS_OK) {
+    return OS_ERR;
+  }
+  assert(localized_counter_name != NULL, "invariant");
+  for (int i = 0; i < process_query->set.size; ++i) {
+    char instanceIndexBuffer[32];
+    const char* counter_path = make_fully_qualified_counter_path(localized_process_object,
+                                                                 localized_counter_name,
+                                                                 process_image_name,
+                                                                 itoa(i, instanceIndexBuffer, 10));
+    if (counter_path == NULL) {
+      return OS_ERR;
+    }
+    MultiCounterQueryP const query = &process_query->set.queries[i];
+    if (add_process_counter(query, slot_index, counter_path, true)) {
+      return OS_ERR;
+    }
+  }
+  return OS_OK;
+}
+
+static CounterQueryP create_counter_query(DWORD pdh_object_idx, DWORD pdh_counter_idx) {
+  if (!((is_valid_pdh_index(pdh_object_idx) && is_valid_pdh_index(pdh_counter_idx)))) {
+    return NULL;
+  }
+  CounterQueryP const query = create_counter_query();
+  const char* object = pdh_localized_artifact(pdh_object_idx);
+  assert(object != NULL, "invariant");
+  const char* counter = pdh_localized_artifact(pdh_counter_idx);
+  assert(counter != NULL, "invariant");
+  const char* full_counter_path = make_fully_qualified_counter_path(object, counter);
+  assert(full_counter_path != NULL, "invariant");
+  add_counter(query, full_counter_path, true);
+  return query;
+}
+
+static void deallocate() {
+  deallocate_pdh_constants();
+  PdhDll::PdhDetach();
+}
+
+static LONG critical_section = 0;
+static LONG reference_count = 0;
+static bool pdh_initialized = false;
+
+static void on_initialization_failure() {
+  // still holder of critical section
+  deallocate();
+  InterlockedExchangeAdd(&reference_count, -1);
+}
+
+static OSReturn initialize() {
+  ResourceMark rm;
+  if (!PdhDll::PdhAttach()) {
+    return OS_ERR;
+  }
+  if (allocate_pdh_constants() != OS_OK) {
+    on_initialization_failure();
+    return OS_ERR;
+  }
+  return OS_OK;
+}
+
+/*
+* Helper to initialize the PDH library, function pointers, constants and counters.
+*
+* Reference counting allows for unloading of pdh.dll granted all sessions use the pair:
+*
+*   pdh_acquire();
+*   pdh_release();
+*
+* @return  OS_OK if successful, OS_ERR on failure.
+*/
+static bool pdh_acquire() {
+  while (InterlockedCompareExchange(&critical_section, 1, 0) == 1);
+  InterlockedExchangeAdd(&reference_count, 1);
+  if (pdh_initialized) {
+    return true;
+  }
+  const OSReturn ret = initialize();
+  if (OS_OK == ret) {
+    pdh_initialized = true;
+  }
+  while (InterlockedCompareExchange(&critical_section, 0, 1) == 0);
+  return ret == OS_OK;
+}
+
+static void pdh_release() {
+  while (InterlockedCompareExchange(&critical_section, 1, 0) == 1);
+  const LONG prev_ref_count = InterlockedExchangeAdd(&reference_count, -1);
+  if (1 == prev_ref_count) {
+    deallocate();
+    pdh_initialized = false;
+  }
+  while (InterlockedCompareExchange(&critical_section, 0, 1) == 0);
+}
+
+class CPUPerformanceInterface::CPUPerformance : public CHeapObj<mtInternal> {
+  friend class CPUPerformanceInterface;
+ private:
+  CounterQueryP _context_switches;
+  ProcessQueryP _process_cpu_load;
+  MultiCounterQueryP _machine_cpu_load;
+
+  int cpu_load(int which_logical_cpu, double* cpu_load);
+  int context_switch_rate(double* rate);
+  int cpu_load_total_process(double* cpu_load);
+  int cpu_loads_process(double* jvm_user_load, double* jvm_kernel_load, double* psystemTotalLoad);
+  CPUPerformance();
+  ~CPUPerformance();
+  bool initialize();
+};
+
+class SystemProcessInterface::SystemProcesses : public CHeapObj<mtInternal> {
+  friend class SystemProcessInterface;
+ private:
+  class ProcessIterator : public CHeapObj<mtInternal> {
+    friend class SystemProcessInterface::SystemProcesses;
+   private:
+    HANDLE         _hProcessSnap;
+    PROCESSENTRY32 _pe32;
+    BOOL           _valid;
+    char           _exePath[MAX_PATH];
+    ProcessIterator();
+    ~ProcessIterator();
+    bool initialize();
+
+    int current(SystemProcess* const process_info);
+    int next_process();
+    bool is_valid() const { return _valid != FALSE; }
+    char* allocate_string(const char* str) const;
+    int snapshot();
+  };
+
+  ProcessIterator* _iterator;
+  SystemProcesses();
+  ~SystemProcesses();
+  bool initialize();
+
+  // information about system processes
+  int system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const;
+};
+
+CPUPerformanceInterface::CPUPerformance::CPUPerformance() : _context_switches(NULL), _process_cpu_load(NULL), _machine_cpu_load(NULL) {}
+
+bool CPUPerformanceInterface::CPUPerformance::initialize() {
+  if (!pdh_acquire()) {
+    return true;
+  }
+  _context_switches = create_counter_query(PDH_SYSTEM_IDX, PDH_CONTEXT_SWITCH_RATE_IDX);
+  _process_cpu_load = create_process_query();
+  if (_process_cpu_load == NULL) {
+    return true;
+  }
+  if (allocate_counters(_process_cpu_load, 2) != OS_OK) {
+    return true;
+  }
+  if (initialize_process_counter(_process_cpu_load, 0, PDH_PROCESSOR_TIME_IDX) != OS_OK) {
+    return true;
+  }
+  if (initialize_process_counter(_process_cpu_load, 1, PDH_PRIV_PROCESSOR_TIME_IDX) != OS_OK) {
+    return true;
+  }
+  _process_cpu_load->set.initialized = true;
+  _machine_cpu_load = create_multi_counter_query();
+  if (_machine_cpu_load == NULL) {
+    return true;
+  }
+  initialize_cpu_query(_machine_cpu_load, PDH_PROCESSOR_TIME_IDX);
+  return true;
+}
+
+CPUPerformanceInterface::CPUPerformance::~CPUPerformance() {
+  if (_context_switches != NULL) {
+    destroy_counter_query(_context_switches);
+    _context_switches = NULL;
+  }
+  if (_process_cpu_load != NULL) {
+    destroy_counter_query(_process_cpu_load);
+    _process_cpu_load = NULL;
+  }
+  if (_machine_cpu_load != NULL) {
+    destroy_counter_query(_machine_cpu_load);
+    _machine_cpu_load = NULL;
+  }
+  pdh_release();
+}
+
+CPUPerformanceInterface::CPUPerformanceInterface() {
+  _impl = NULL;
+}
+
+bool CPUPerformanceInterface::initialize() {
+  _impl = new CPUPerformanceInterface::CPUPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+CPUPerformanceInterface::~CPUPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+int CPUPerformanceInterface::cpu_load(int which_logical_cpu, double* cpu_load) const {
+  return _impl->cpu_load(which_logical_cpu, cpu_load);
+}
+
+int CPUPerformanceInterface::context_switch_rate(double* rate) const {
+  return _impl->context_switch_rate(rate);
+}
+
+int CPUPerformanceInterface::cpu_load_total_process(double* cpu_load) const {
+  return _impl->cpu_load_total_process(cpu_load);
+}
+
+int CPUPerformanceInterface::cpu_loads_process(double* pjvmUserLoad,
+                                               double* pjvmKernelLoad,
+                                               double* psystemTotalLoad) const {
+  return _impl->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotalLoad);
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, double* cpu_load) {
+  *cpu_load = .0;
+  if (_machine_cpu_load == NULL || !_machine_cpu_load->initialized) {
+    return OS_ERR;
+  }
+  assert(_machine_cpu_load != NULL, "invariant");
+  assert(which_logical_cpu < _machine_cpu_load->noOfCounters, "invariant");
+
+  if (collect_query_data(_machine_cpu_load)) {
+    return OS_ERR;
+  }
+  // -1 is total (all cpus)
+  const int counter_idx = -1 == which_logical_cpu ? _machine_cpu_load->noOfCounters - 1 : which_logical_cpu;
+  PDH_FMT_COUNTERVALUE counter_value;
+  formatted_counter_value(_machine_cpu_load->counters[counter_idx], PDH_FMT_DOUBLE, &counter_value);
+  *cpu_load = counter_value.doubleValue / 100;
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_load) {
+  *cpu_load = .0;
+  if (_process_cpu_load == NULL || !_process_cpu_load->set.initialized) {
+    return OS_ERR;
+  }
+  assert(_process_cpu_load != NULL, "invariant");
+  if (collect_process_query_data(_process_cpu_load)) {
+    return OS_ERR;
+  }
+  PDH_FMT_COUNTERVALUE counter_value;
+  if (query_process_counter(_process_cpu_load, 0, PDH_FMT_DOUBLE | PDH_FMT_NOCAP100, &counter_value) != OS_OK) {
+    return OS_ERR;
+  }
+  double process_load = counter_value.doubleValue / cpu_factor();
+  process_load = MIN2<double>(1, process_load);
+  process_load = MAX2<double>(0, process_load);
+  *cpu_load = process_load;
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserLoad,
+                                                               double* pjvmKernelLoad,
+                                                               double* psystemTotalLoad) {
+  assert(pjvmUserLoad != NULL, "pjvmUserLoad is NULL!");
+  assert(pjvmKernelLoad != NULL, "pjvmKernelLoad is NULL!");
+  assert(psystemTotalLoad != NULL, "psystemTotalLoad is NULL!");
+  *pjvmUserLoad = .0;
+  *pjvmKernelLoad = .0;
+  *psystemTotalLoad = .0;
+
+  if (_process_cpu_load == NULL || !_process_cpu_load->set.initialized) {
+    return OS_ERR;
+  }
+  assert(_process_cpu_load != NULL, "invariant");
+  if (collect_process_query_data(_process_cpu_load)) {
+    return OS_ERR;
+  }
+  double process_load = .0;
+  PDH_FMT_COUNTERVALUE counter_value;
+  // Read  PDH_PROCESSOR_TIME_IDX
+  if (query_process_counter(_process_cpu_load, 0, PDH_FMT_DOUBLE | PDH_FMT_NOCAP100, &counter_value) != OS_OK) {
+    return OS_ERR;
+  }
+  process_load = counter_value.doubleValue / cpu_factor();
+  process_load = MIN2<double>(1, process_load);
+  process_load = MAX2<double>(0, process_load);
+  // Read PDH_PRIV_PROCESSOR_TIME_IDX
+  if (query_process_counter(_process_cpu_load, 1, PDH_FMT_DOUBLE | PDH_FMT_NOCAP100, &counter_value) != OS_OK) {
+    return OS_ERR;
+  }
+  double kernel_load = counter_value.doubleValue / cpu_factor();
+  kernel_load = MIN2<double>(1, kernel_load);
+  kernel_load = MAX2<double>(0, kernel_load);
+  *pjvmKernelLoad = kernel_load;
+
+  double user_load = process_load - kernel_load;
+  user_load = MIN2<double>(1, user_load);
+  user_load = MAX2<double>(0, user_load);
+  *pjvmUserLoad = user_load;
+
+  if (collect_query_data(_machine_cpu_load)) {
+    return OS_ERR;
+  }
+  if (formatted_counter_value(_machine_cpu_load->counters[_machine_cpu_load->noOfCounters - 1], PDH_FMT_DOUBLE, &counter_value) != OS_OK) {
+    return OS_ERR;
+  }
+  double machine_load = counter_value.doubleValue / 100;
+  assert(machine_load >= 0, "machine_load is negative!");
+  // clamp at user+system and 1.0
+  if (*pjvmKernelLoad + *pjvmUserLoad > machine_load) {
+    machine_load = MIN2(*pjvmKernelLoad + *pjvmUserLoad, 1.0);
+  }
+  *psystemTotalLoad = machine_load;
+  return OS_OK;
+}
+
+int CPUPerformanceInterface::CPUPerformance::context_switch_rate(double* rate) {
+  assert(rate != NULL, "invariant");
+  *rate = .0;
+  if (_context_switches == NULL || !_context_switches->initialized) {
+    return OS_ERR;
+  }
+  assert(_context_switches != NULL, "invariant");
+  if (collect_query_data(_context_switches) != OS_OK) {
+    return OS_ERR;
+  }
+  PDH_FMT_COUNTERVALUE counter_value;
+  if (formatted_counter_value(_context_switches->counter, PDH_FMT_DOUBLE, &counter_value) != OS_OK) {
+    return OS_ERR;
+  }
+  *rate = counter_value.doubleValue;
+  return OS_OK;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::ProcessIterator() {
+  _hProcessSnap = INVALID_HANDLE_VALUE;
+  _valid = FALSE;
+  _pe32.dwSize = sizeof(PROCESSENTRY32);
+}
+
+bool SystemProcessInterface::SystemProcesses::ProcessIterator::initialize() {
+  return true;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::snapshot() {
+  // take snapshot of all process in the system
+  _hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+  if (_hProcessSnap == INVALID_HANDLE_VALUE) {
+    return OS_ERR;
+  }
+  // step to first process
+  _valid = Process32First(_hProcessSnap, &_pe32);
+  return is_valid() ? OS_OK : OS_ERR;
+}
+
+SystemProcessInterface::SystemProcesses::ProcessIterator::~ProcessIterator() {
+  if (_hProcessSnap != INVALID_HANDLE_VALUE) {
+    CloseHandle(_hProcessSnap);
+  }
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::current(SystemProcess* process_info) {
+  assert(is_valid(), "no current process to be fetched!");
+  assert(process_info != NULL, "process_info is NULL!");
+  char* exePath = NULL;
+  HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, false, _pe32.th32ProcessID);
+  if (hProcess != NULL) {
+    HMODULE hMod;
+    DWORD cbNeeded;
+    if (EnumProcessModules(hProcess, &hMod, sizeof(hMod), &cbNeeded) != 0) {
+      if (GetModuleFileNameExA(hProcess, hMod, _exePath, sizeof(_exePath)) != 0) {
+        exePath = _exePath;
+      }
+    }
+    CloseHandle (hProcess);
+  }
+  process_info->set_pid((int)_pe32.th32ProcessID);
+  process_info->set_name(allocate_string(_pe32.szExeFile));
+  process_info->set_path(allocate_string(exePath));
+  return OS_OK;
+}
+
+char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
+  if (str != NULL) {
+    size_t len = strlen(str);
+    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
+    if (NULL == tmp) {
+      return NULL;
+    }
+    strncpy(tmp, str, len);
+    tmp[len] = '\0';
+    return tmp;
+  }
+  return NULL;
+}
+
+int SystemProcessInterface::SystemProcesses::ProcessIterator::next_process() {
+  _valid = Process32Next(_hProcessSnap, &_pe32);
+  return OS_OK;
+}
+
+SystemProcessInterface::SystemProcesses::SystemProcesses() {
+  _iterator = NULL;
+}
+
+bool SystemProcessInterface::SystemProcesses::initialize() {
+  _iterator = new SystemProcessInterface::SystemProcesses::ProcessIterator();
+  return _iterator != NULL && _iterator->initialize();
+}
+
+SystemProcessInterface::SystemProcesses::~SystemProcesses() {
+  if (_iterator != NULL) {
+    delete _iterator;
+    _iterator = NULL;
+  }
+}
+
+int SystemProcessInterface::SystemProcesses::system_processes(SystemProcess** system_processes,
+                                                              int* no_of_sys_processes) const {
+  assert(system_processes != NULL, "system_processes pointer is NULL!");
+  assert(no_of_sys_processes != NULL, "system_processes counter pointers is NULL!");
+  assert(_iterator != NULL, "iterator is NULL!");
+
+  // initialize pointers
+  *no_of_sys_processes = 0;
+  *system_processes = NULL;
+
+  // take process snapshot
+  if (_iterator->snapshot() != OS_OK) {
+    return OS_ERR;
+  }
+
+  while (_iterator->is_valid()) {
+    SystemProcess* tmp = new SystemProcess();
+    _iterator->current(tmp);
+
+    //if already existing head
+    if (*system_processes != NULL) {
+      //move "first to second"
+      tmp->set_next(*system_processes);
+    }
+    // new head
+    *system_processes = tmp;
+    // increment
+    (*no_of_sys_processes)++;
+    // step forward
+    _iterator->next_process();
+  }
+  return OS_OK;
+}
+
+int SystemProcessInterface::system_processes(SystemProcess** system_procs,
+                                             int* no_of_sys_processes) const {
+  return _impl->system_processes(system_procs, no_of_sys_processes);
+}
+
+SystemProcessInterface::SystemProcessInterface() {
+  _impl = NULL;
+}
+
+bool SystemProcessInterface::initialize() {
+  _impl = new SystemProcessInterface::SystemProcesses();
+  return _impl != NULL && _impl->initialize();
+}
+
+SystemProcessInterface::~SystemProcessInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+CPUInformationInterface::CPUInformationInterface() {
+  _cpu_info = NULL;
+}
+
+bool CPUInformationInterface::initialize() {
+  _cpu_info = new CPUInformation();
+  if (NULL == _cpu_info) {
+    return false;
+  }
+  _cpu_info->set_number_of_hardware_threads(VM_Version_Ext::number_of_threads());
+  _cpu_info->set_number_of_cores(VM_Version_Ext::number_of_cores());
+  _cpu_info->set_number_of_sockets(VM_Version_Ext::number_of_sockets());
+  _cpu_info->set_cpu_name(VM_Version_Ext::cpu_name());
+  _cpu_info->set_cpu_description(VM_Version_Ext::cpu_description());
+  return true;
+}
+
+CPUInformationInterface::~CPUInformationInterface() {
+  if (_cpu_info != NULL) {
+    const char* cpu_name = _cpu_info->cpu_name();
+    if (cpu_name != NULL) {
+      FREE_C_HEAP_ARRAY(char, cpu_name, mtInternal);
+      _cpu_info->set_cpu_name(NULL);
+    }
+    const char* cpu_desc = _cpu_info->cpu_description();
+    if (cpu_desc != NULL) {
+      FREE_C_HEAP_ARRAY(char, cpu_desc, mtInternal);
+      _cpu_info->set_cpu_description(NULL);
+    }
+    delete _cpu_info;
+    _cpu_info = NULL;
+  }
+}
+
+int CPUInformationInterface::cpu_information(CPUInformation& cpu_info) {
+  if (NULL == _cpu_info) {
+    return OS_ERR;
+  }
+  cpu_info = *_cpu_info; // shallow copy assignment
+  return OS_OK;
+}
+
+class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtInternal> {
+  friend class NetworkPerformanceInterface;
+ private:
+  bool _iphlp_attached;
+
+  NetworkPerformance();
+  NetworkPerformance(const NetworkPerformance& rhs); // no impl
+  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  bool initialize();
+  ~NetworkPerformance();
+  int network_utilization(NetworkInterface** network_interfaces) const;
+};
+
+NetworkPerformanceInterface::NetworkPerformance::NetworkPerformance()
+: _iphlp_attached(false) {
+}
+
+bool NetworkPerformanceInterface::NetworkPerformance::initialize() {
+  _iphlp_attached = IphlpDll::IphlpAttach();
+  return _iphlp_attached;
+}
+
+NetworkPerformanceInterface::NetworkPerformance::~NetworkPerformance() {
+  if (_iphlp_attached) {
+    IphlpDll::IphlpDetach();
+  }
+}
+
+int NetworkPerformanceInterface::NetworkPerformance::network_utilization(NetworkInterface** network_interfaces) const {
+  MIB_IF_TABLE2* table;
+
+  if (IphlpDll::GetIfTable2(&table) != NO_ERROR) {
+    return OS_ERR;
+  }
+
+  NetworkInterface* ret = NULL;
+  for (ULONG i = 0; i < table->NumEntries; ++i) {
+    if (table->Table[i].InterfaceAndOperStatusFlags.FilterInterface) {
+      continue;
+    }
+
+    char buf[256];
+    if (WideCharToMultiByte(CP_UTF8, 0, table->Table[i].Description, -1, buf, sizeof(buf), NULL, NULL) == 0) {
+      continue;
+    }
+
+    NetworkInterface* cur = new NetworkInterface(buf, table->Table[i].InOctets, table->Table[i].OutOctets, ret);
+    ret = cur;
+  }
+
+  IphlpDll::FreeMibTable(table);
+  *network_interfaces = ret;
+
+  return OS_OK;
+}
+
+NetworkPerformanceInterface::NetworkPerformanceInterface() {
+  _impl = NULL;
+}
+
+NetworkPerformanceInterface::~NetworkPerformanceInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+  }
+}
+
+bool NetworkPerformanceInterface::initialize() {
+  _impl = new NetworkPerformanceInterface::NetworkPerformance();
+  return _impl != NULL && _impl->initialize();
+}
+
+int NetworkPerformanceInterface::network_utilization(NetworkInterface** network_interfaces) const {
+  return _impl->network_utilization(network_interfaces);
+}
--- a/src/os/windows/vm/os_windows.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/windows/vm/os_windows.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1669,6 +1669,50 @@
    enumerate_modules(pid, _print_module, (void *)st);
 }
 
+int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
+  HANDLE   hProcess;
+
+# define MAX_NUM_MODULES 128
+  HMODULE     modules[MAX_NUM_MODULES];
+  static char filename[MAX_PATH];
+  int         result = 0;
+
+  int pid = os::current_process_id();
+  hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
+                         FALSE, pid);
+  if (hProcess == NULL) return 0;
+
+  DWORD size_needed;
+  if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
+    CloseHandle(hProcess);
+    return 0;
+  }
+
+  // number of modules that are currently loaded
+  int num_modules = size_needed / sizeof(HMODULE);
+
+  for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
+    // Get Full pathname:
+    if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
+      filename[0] = '\0';
+    }
+
+    MODULEINFO modinfo;
+    if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
+      modinfo.lpBaseOfDll = NULL;
+      modinfo.SizeOfImage = 0;
+    }
+
+    // Invoke callback function
+    result = callback(filename, (address)modinfo.lpBaseOfDll,
+                      (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
+    if (result) break;
+  }
+
+  CloseHandle(hProcess);
+  return result;
+}
+
 void os::print_os_info_brief(outputStream* st) {
   os::print_os_info(st);
 }
@@ -4352,6 +4396,22 @@
   return (jlong) ::_lseeki64(fd, offset, whence);
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  OVERLAPPED ov;
+  DWORD nread;
+  BOOL result;
+
+  ZeroMemory(&ov, sizeof(ov));
+  ov.Offset = (DWORD)offset;
+  ov.OffsetHigh = (DWORD)(offset >> 32);
+
+  HANDLE h = (HANDLE)::_get_osfhandle(fd);
+
+  result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
+
+  return result ? nread : 0;
+}
+
 // This method is a slightly reworked copy of JDK's sysNativePath
 // from src/windows/hpi/src/path_md.c
 
@@ -4811,6 +4871,40 @@
   }
 }
 
+Thread* os::ThreadCrashProtection::_protected_thread = NULL;
+os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
+volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
+
+os::ThreadCrashProtection::ThreadCrashProtection() {
+}
+
+// See the caveats for this class in os_windows.hpp
+// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
+// into this method and returns false. If no OS EXCEPTION was raised, returns
+// true.
+// The callback is supposed to provide the method that should be protected.
+//
+bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
+
+  Thread::muxAcquire(&_crash_mux, "CrashProtection");
+
+  _protected_thread = ThreadLocalStorage::thread();
+  assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
+
+  bool success = true;
+  __try {
+    _crash_protection = this;
+    cb.call();
+  } __except(EXCEPTION_EXECUTE_HANDLER) {
+    // only for protection, nothing to do
+    success = false;
+  }
+  _crash_protection = NULL;
+  _protected_thread = NULL;
+  Thread::muxRelease(&_crash_mux);
+  return success;
+}
+
 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
 }
--- a/src/os/windows/vm/os_windows.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os/windows/vm/os_windows.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -111,6 +111,28 @@
  * don't call code that could leave the heap / memory in an inconsistent state,
  * or anything else where we are not in control if we suddenly jump out.
  */
+class ThreadCrashProtection : public StackObj {
+public:
+  static bool is_crash_protected(Thread* thr) {
+    return _crash_protection != NULL && _protected_thread == thr;
+  }
+
+  ThreadCrashProtection();
+  bool call(os::CrashProtectionCallback& cb);
+private:
+  static Thread* _protected_thread;
+  static ThreadCrashProtection* _crash_protection;
+  static volatile intptr_t _crash_mux;
+};
+
+/*
+ * Crash protection for the watcher thread. Wrap the callback
+ * with a __try { call() }
+ * To be able to use this - don't take locks, don't rely on destructors,
+ * don't make OS library calls, don't allocate memory, don't print,
+ * don't call code that could leave the heap / memory in an inconsistent state,
+ * or anything else where we are not in control if we suddenly jump out.
+ */
 class WatcherThreadCrashProtection : public StackObj {
 public:
   WatcherThreadCrashProtection();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/pdh_interface.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "pdh_interface.hpp"
+#include "runtime/os.hpp"
+#include "utilities/macros.hpp"
+
+// PDH API
+typedef PDH_STATUS (WINAPI *PdhAddCounter_Fn)(HQUERY, LPCSTR, DWORD, HCOUNTER*);
+typedef PDH_STATUS (WINAPI *PdhOpenQuery_Fn)(LPCWSTR, DWORD, HQUERY*);
+typedef DWORD      (WINAPI *PdhCloseQuery_Fn)(HQUERY);
+typedef PDH_STATUS (WINAPI *PdhCollectQueryData_Fn)(HQUERY);
+typedef DWORD      (WINAPI *PdhGetFormattedCounterValue_Fn)(HCOUNTER, DWORD, LPDWORD, PPDH_FMT_COUNTERVALUE);
+typedef PDH_STATUS (WINAPI *PdhEnumObjectItems_Fn)(LPCTSTR, LPCTSTR, LPCTSTR, LPTSTR, LPDWORD, LPTSTR, LPDWORD, DWORD, DWORD);
+typedef PDH_STATUS (WINAPI *PdhRemoveCounter_Fn)(HCOUNTER);
+typedef PDH_STATUS (WINAPI *PdhLookupPerfNameByIndex_Fn)(LPCSTR, DWORD, LPSTR, LPDWORD);
+typedef PDH_STATUS (WINAPI *PdhMakeCounterPath_Fn)(PDH_COUNTER_PATH_ELEMENTS*, LPTSTR, LPDWORD, DWORD);
+
+PdhAddCounter_Fn PdhDll::_PdhAddCounter = NULL;
+PdhOpenQuery_Fn  PdhDll::_PdhOpenQuery = NULL;
+PdhCloseQuery_Fn PdhDll::_PdhCloseQuery = NULL;
+PdhCollectQueryData_Fn PdhDll::_PdhCollectQueryData = NULL;
+PdhGetFormattedCounterValue_Fn PdhDll::_PdhGetFormattedCounterValue = NULL;
+PdhEnumObjectItems_Fn PdhDll::_PdhEnumObjectItems = NULL;
+PdhRemoveCounter_Fn PdhDll::_PdhRemoveCounter = NULL;
+PdhLookupPerfNameByIndex_Fn PdhDll::_PdhLookupPerfNameByIndex = NULL;
+PdhMakeCounterPath_Fn PdhDll::_PdhMakeCounterPath = NULL;
+
+LONG PdhDll::_critical_section = 0;
+LONG PdhDll::_initialized = 0;
+LONG PdhDll::_pdh_reference_count = 0;
+HMODULE PdhDll::_hModule = NULL;
+
+void PdhDll::initialize(void) {
+  _hModule = os::win32::load_Windows_dll("pdh.dll", NULL, 0);
+  if (NULL == _hModule) {
+    return;
+  }
+  // The 'A' at the end means the ANSI (not the UNICODE) vesions of the methods
+  _PdhAddCounter               = (PdhAddCounter_Fn)::GetProcAddress(_hModule, "PdhAddCounterA");
+  _PdhOpenQuery                = (PdhOpenQuery_Fn)::GetProcAddress(_hModule, "PdhOpenQueryA");
+  _PdhCloseQuery               = (PdhCloseQuery_Fn)::GetProcAddress(_hModule, "PdhCloseQuery");
+  _PdhCollectQueryData         = (PdhCollectQueryData_Fn)::GetProcAddress(_hModule, "PdhCollectQueryData");
+  _PdhGetFormattedCounterValue = (PdhGetFormattedCounterValue_Fn)::GetProcAddress(_hModule, "PdhGetFormattedCounterValue");
+  _PdhEnumObjectItems          = (PdhEnumObjectItems_Fn)::GetProcAddress(_hModule, "PdhEnumObjectItemsA");
+  _PdhRemoveCounter            = (PdhRemoveCounter_Fn)::GetProcAddress(_hModule, "PdhRemoveCounter");
+  _PdhLookupPerfNameByIndex    = (PdhLookupPerfNameByIndex_Fn)::GetProcAddress(_hModule, "PdhLookupPerfNameByIndexA");
+  _PdhMakeCounterPath          = (PdhMakeCounterPath_Fn)::GetProcAddress(_hModule, "PdhMakeCounterPathA");
+  InterlockedExchange(&_initialized, 1);
+}
+
+bool PdhDll::PdhDetach(void) {
+  LONG prev_ref_count = InterlockedExchangeAdd(&_pdh_reference_count, -1);
+  BOOL ret = false;
+  if (1 == prev_ref_count) {
+    if (_initialized && _hModule != NULL) {
+      ret = FreeLibrary(_hModule);
+      if (ret) {
+        _hModule = NULL;
+        _PdhAddCounter = NULL;
+        _PdhOpenQuery = NULL;
+        _PdhCloseQuery = NULL;
+        _PdhCollectQueryData = NULL;
+        _PdhGetFormattedCounterValue = NULL;
+        _PdhEnumObjectItems = NULL;
+        _PdhRemoveCounter = NULL;
+        _PdhLookupPerfNameByIndex = NULL;
+        _PdhMakeCounterPath = NULL;
+        InterlockedExchange(&_initialized, 0);
+      }
+    }
+  }
+  return ret != 0;
+}
+
+bool PdhDll::PdhAttach(void) {
+  InterlockedExchangeAdd(&_pdh_reference_count, 1);
+  if (1 == _initialized) {
+    return true;
+  }
+  while (InterlockedCompareExchange(&_critical_section, 1, 0) == 1);
+  if (0 == _initialized) {
+    initialize();
+  }
+  while (InterlockedCompareExchange(&_critical_section, 0, 1) == 0);
+  return (_PdhAddCounter != NULL && _PdhOpenQuery != NULL
+         && _PdhCloseQuery != NULL && PdhCollectQueryData != NULL
+         && _PdhGetFormattedCounterValue != NULL && _PdhEnumObjectItems != NULL
+         && _PdhRemoveCounter != NULL && PdhLookupPerfNameByIndex != NULL
+         && _PdhMakeCounterPath != NULL);
+}
+
+PDH_STATUS PdhDll::PdhAddCounter(HQUERY hQuery, LPCSTR szFullCounterPath, DWORD dwUserData, HCOUNTER* phCounter) {
+  assert(_initialized && _PdhAddCounter != NULL, "PdhAvailable() not yet called");
+  return _PdhAddCounter(hQuery, szFullCounterPath, dwUserData, phCounter);
+}
+
+PDH_STATUS PdhDll::PdhOpenQuery(LPCWSTR szDataSource, DWORD dwUserData, HQUERY* phQuery) {
+  assert(_initialized && _PdhOpenQuery != NULL, "PdhAvailable() not yet called");
+  return _PdhOpenQuery(szDataSource, dwUserData, phQuery);
+}
+
+DWORD PdhDll::PdhCloseQuery(HQUERY hQuery) {
+  assert(_initialized && _PdhCloseQuery != NULL, "PdhAvailable() not yet called");
+  return _PdhCloseQuery(hQuery);
+}
+
+PDH_STATUS PdhDll::PdhCollectQueryData(HQUERY hQuery) {
+  assert(_initialized && _PdhCollectQueryData != NULL, "PdhAvailable() not yet called");
+  return _PdhCollectQueryData(hQuery);
+}
+
+DWORD PdhDll::PdhGetFormattedCounterValue(HCOUNTER hCounter, DWORD dwFormat, LPDWORD lpdwType, PPDH_FMT_COUNTERVALUE pValue) {
+  assert(_initialized && _PdhGetFormattedCounterValue != NULL, "PdhAvailable() not yet called");
+  return _PdhGetFormattedCounterValue(hCounter, dwFormat, lpdwType, pValue);
+}
+
+PDH_STATUS PdhDll::PdhEnumObjectItems(LPCTSTR szDataSource, LPCTSTR szMachineName, LPCTSTR szObjectName,
+    LPTSTR mszCounterList, LPDWORD pcchCounterListLength, LPTSTR mszInstanceList,
+    LPDWORD pcchInstanceListLength, DWORD dwDetailLevel, DWORD dwFlags) {
+  assert(_initialized && _PdhEnumObjectItems != NULL, "PdhAvailable() not yet called");
+  return _PdhEnumObjectItems(szDataSource, szMachineName, szObjectName, mszCounterList, pcchCounterListLength,
+    mszInstanceList, pcchInstanceListLength, dwDetailLevel, dwFlags);
+}
+
+PDH_STATUS PdhDll::PdhRemoveCounter(HCOUNTER hCounter) {
+  assert(_initialized && _PdhRemoveCounter != NULL, "PdhAvailable() not yet called");
+  return _PdhRemoveCounter(hCounter);
+}
+
+PDH_STATUS PdhDll::PdhLookupPerfNameByIndex(LPCSTR szMachineName, DWORD dwNameIndex, LPSTR szNameBuffer, LPDWORD pcchNameBufferSize) {
+  assert(_initialized && _PdhLookupPerfNameByIndex != NULL, "PdhAvailable() not yet called");
+  return _PdhLookupPerfNameByIndex(szMachineName, dwNameIndex, szNameBuffer, pcchNameBufferSize);
+}
+
+PDH_STATUS PdhDll::PdhMakeCounterPath(PDH_COUNTER_PATH_ELEMENTS* pCounterPathElements, LPTSTR szFullPathBuffer, LPDWORD pcchBufferSize, DWORD dwFlags) {
+  assert(_initialized && _PdhMakeCounterPath != NULL, "PdhAvailable() not yet called");
+  return _PdhMakeCounterPath(pCounterPathElements, szFullPathBuffer, pcchBufferSize, dwFlags);
+}
+
+bool PdhDll::PdhStatusFail(PDH_STATUS pdhStat) {
+  return pdhStat != ERROR_SUCCESS && pdhStat != PDH_MORE_DATA;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/pdh_interface.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_WINDOWS_VM_PDH_INTERFACE_HPP
+#define OS_WINDOWS_VM_PDH_INTERFACE_HPP
+
+#include "memory/allocation.hpp"
+#include <pdh.h>
+#include <pdhmsg.h>
+
+class PdhDll: public AllStatic {
+ private:
+  static LONG       _pdh_reference_count;
+  static LONG       _critical_section;
+  static LONG       _initialized;
+  static HMODULE    _hModule;
+  static void       initialize();
+  static PDH_STATUS (WINAPI *_PdhAddCounter)(HQUERY, LPCSTR, DWORD, HCOUNTER*);
+  static PDH_STATUS (WINAPI *_PdhOpenQuery)(LPCWSTR, DWORD, HQUERY*);
+  static DWORD      (WINAPI *_PdhCloseQuery)(HQUERY);
+  static PDH_STATUS (WINAPI *_PdhCollectQueryData)(HQUERY);
+  static DWORD      (WINAPI *_PdhGetFormattedCounterValue)(HCOUNTER, DWORD, LPDWORD, PPDH_FMT_COUNTERVALUE);
+  static PDH_STATUS (WINAPI *_PdhEnumObjectItems)(LPCTSTR, LPCTSTR, LPCTSTR, LPTSTR, LPDWORD, LPTSTR, LPDWORD, DWORD, DWORD);
+  static PDH_STATUS (WINAPI *_PdhRemoveCounter)(HCOUNTER);
+  static PDH_STATUS (WINAPI *_PdhLookupPerfNameByIndex)(LPCSTR, DWORD, LPSTR, LPDWORD);
+  static PDH_STATUS (WINAPI *_PdhMakeCounterPath)(PPDH_COUNTER_PATH_ELEMENTS, LPTSTR, LPDWORD, DWORD);
+
+ public:
+  static PDH_STATUS PdhAddCounter(HQUERY, LPCSTR, DWORD, HCOUNTER*);
+  static PDH_STATUS PdhOpenQuery(LPCWSTR, DWORD, HQUERY*);
+  static DWORD      PdhCloseQuery(HQUERY);
+  static PDH_STATUS PdhCollectQueryData(HQUERY);
+  static DWORD      PdhGetFormattedCounterValue(HCOUNTER, DWORD, LPDWORD, PPDH_FMT_COUNTERVALUE);
+  static PDH_STATUS PdhEnumObjectItems(LPCTSTR, LPCTSTR, LPCTSTR, LPTSTR, LPDWORD, LPTSTR, LPDWORD, DWORD, DWORD);
+  static PDH_STATUS PdhRemoveCounter(HCOUNTER);
+  static PDH_STATUS PdhLookupPerfNameByIndex(LPCSTR, DWORD, LPSTR, LPDWORD);
+  static PDH_STATUS PdhMakeCounterPath(PPDH_COUNTER_PATH_ELEMENTS, LPTSTR, LPDWORD, DWORD);
+  static bool       PdhStatusFail(PDH_STATUS pdhStat);
+  static bool       PdhAttach();
+  static bool       PdhDetach();
+};
+
+#endif // OS_WINDOWS_VM_PDH_INTERFACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/semaphore_windows.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "semaphore_windows.hpp"
+#include "utilities/debug.hpp"
+
+#include <windows.h>
+#include <errno.h>
+
+WindowsSemaphore::WindowsSemaphore(uint value) {
+  _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
+
+  guarantee(_semaphore != NULL, err_msg("CreateSemaphore failed with error code: %lu", GetLastError()));
+}
+
+WindowsSemaphore::~WindowsSemaphore() {
+  ::CloseHandle(_semaphore);
+}
+
+void WindowsSemaphore::signal(uint count) {
+  if (count > 0) {
+    BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
+
+    assert(ret != 0, err_msg("ReleaseSemaphore failed with error code: %lu", GetLastError()));
+  }
+}
+
+void WindowsSemaphore::wait() {
+  DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
+  assert(ret != WAIT_FAILED,   err_msg("WaitForSingleObject failed with error code: %lu", GetLastError()));
+  assert(ret == WAIT_OBJECT_0, err_msg("WaitForSingleObject failed with return value: %lu", ret));
+}
+
+bool WindowsSemaphore::trywait() {
+  DWORD ret = ::WaitForSingleObject(_semaphore, 0);
+  assert(ret != WAIT_FAILED,   err_msg("WaitForSingleObject failed with error code: %lu", GetLastError()));
+  return ret == WAIT_OBJECT_0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/semaphore_windows.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_WINDOWS_VM_SEMAPHORE_WINDOWS_HPP
+#define OS_WINDOWS_VM_SEMAPHORE_WINDOWS_HPP
+
+#include "memory/allocation.hpp"
+
+#include <windows.h>
+
+class WindowsSemaphore : public CHeapObj<mtInternal> {
+  HANDLE _semaphore;
+
+  // Prevent copying and assignment.
+  WindowsSemaphore(const WindowsSemaphore&);
+  WindowsSemaphore& operator=(const WindowsSemaphore&);
+
+ public:
+  WindowsSemaphore(uint value = 0);
+  ~WindowsSemaphore();
+
+  void signal(uint count = 1);
+
+  void wait();
+
+  bool trywait();
+};
+
+typedef WindowsSemaphore SemaphoreImpl;
+
+#endif // OS_WINDOWS_VM_SEMAPHORE_WINDOWS_HPP
--- a/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -24,13 +24,64 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/frame.hpp"
+#include "runtime/frame.inline.hpp"
 #include "runtime/thread.hpp"
 
+bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
+  assert(this->is_Java_thread(), "must be JavaThread");
+
+  // If we have a last_Java_frame, then we should use it even if
+  // isInJava == true.  It should be more reliable than ucontext info.
+  if (has_last_Java_frame() && frame_anchor()->walkable()) {
+    *fr_addr = pd_last_frame();
+    return true;
+  }
+
+  // At this point, we don't have a last_Java_frame, so
+  // we try to glean some information out of the ucontext
+  // if we were running Java code when SIGPROF came in.
+  if (isInJava) {
+    ucontext_t* uc = (ucontext_t*) ucontext;
+    frame ret_frame((intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/],
+                     (address)uc->uc_mcontext.regs->nip);
+
+    if (ret_frame.pc() == NULL) {
+      // ucontext wasn't useful
+      return false;
+    }
+
+    if (ret_frame.is_interpreted_frame()) {
+       frame::ijava_state* istate = ret_frame.get_ijava_state();
+       if (!((Method*)(istate->method))->is_metaspace_object()) {
+         return false;
+       }
+       uint64_t reg_bcp = uc->uc_mcontext.regs->gpr[14/*R14_bcp*/];
+       uint64_t istate_bcp = istate->bcp;
+       uint64_t code_start = (uint64_t)(((Method*)(istate->method))->code_base());
+       uint64_t code_end = (uint64_t)(((Method*)istate->method)->code_base() + ((Method*)istate->method)->code_size());
+       if (istate_bcp >= code_start && istate_bcp < code_end) {
+         // we have a valid bcp, don't touch it, do nothing
+       } else if (reg_bcp >= code_start && reg_bcp < code_end) {
+         istate->bcp = reg_bcp;
+      } else {
+         return false;
+       }
+    }
+    if (!ret_frame.safe_for_sender(this)) {
+      // nothing else to try if the frame isn't good
+      return false;
+    }
+    *fr_addr = ret_frame;
+    return true;
+  }
+  // nothing else to try
+  return false;
+}
+
 // Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
-  Unimplemented();
-  return false;
+  assert(this->is_Java_thread(), "must be JavaThread");
+  return pd_get_top_frame_for_profiling(fr_addr, ucontext, isInJava);
 }
 
 void JavaThread::cache_global_variables() { }
--- a/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -62,6 +62,8 @@
 
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
 
+  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
+
  protected:
 
   // -Xprof support
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.inline.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,8 @@
 
 #include "runtime/os.hpp"
 
+extern "C" jlong _raw_rdtsc(); // In .il file
+
 inline jlong os::rdtsc() { return _raw_rdtsc(); }
 
 #endif // OS_CPU_SOLARIS_X86_VM_OS_SOLARIS_X86_INLINE_HPP
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -34,6 +34,7 @@
 #include "ci/ciMemberName.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecode.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "utilities/bitMap.inline.hpp"
@@ -3460,10 +3461,16 @@
       if (!InlineArrayCopy) return false;
       break;
 
-#ifdef TRACE_HAVE_INTRINSICS
-    case vmIntrinsics::_classID:
-    case vmIntrinsics::_threadID:
-      preserves_state = true;
+#ifdef JFR_HAVE_INTRINSICS
+#if defined(_LP64) || !defined(TRACE_ID_CLASS_SHIFT)
+    case vmIntrinsics::_getClassId:
+      preserves_state = false;
+      cantrap = false;
+      break;
+#endif
+
+    case vmIntrinsics::_getEventWriter:
+      preserves_state = false;
       cantrap = true;
       break;
 
@@ -4396,6 +4403,30 @@
 }
 
 
+static void post_inlining_event(EventCompilerInlining* event,
+                                int compile_id,
+                                const char* msg,
+                                bool success,
+                                int bci,
+                                ciMethod* caller,
+                                ciMethod* callee) {
+  assert(caller != NULL, "invariant");
+  assert(callee != NULL, "invariant");
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  JfrStructCalleeMethod callee_struct;
+  callee_struct.set_type(callee->holder()->name()->as_utf8());
+  callee_struct.set_name(callee->name()->as_utf8());
+  callee_struct.set_descriptor(callee->signature()->as_symbol()->as_utf8());
+  event->set_compileId(compile_id);
+  event->set_message(msg);
+  event->set_succeeded(success);
+  event->set_bci(bci);
+  event->set_caller(caller->get_Method());
+  event->set_callee(callee_struct);
+  event->commit();
+}
+
 void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) {
   CompileLog* log = compilation()->log();
   if (log != NULL) {
@@ -4412,6 +4443,11 @@
     }
   }
 
+  EventCompilerInlining event;
+  if (event.should_commit()) {
+    post_inlining_event(&event, compilation()->env()->task()->compile_id(), msg, success, bci(), method(), callee);
+  }
+
   if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
     return;
   }
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -3059,6 +3059,51 @@
   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
 }
 
+#ifdef JFR_HAVE_INTRINSICS
+void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
+  CodeEmitInfo* info = state_for(x);
+  CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
+
+  assert(info != NULL, "must have info");
+  LIRItem arg(x->argument_at(0), this);
+
+  arg.load_item();
+  LIR_Opr klass = new_register(T_METADATA);
+  __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
+  LIR_Opr id = new_register(T_LONG);
+  ByteSize offset = KLASS_TRACE_ID_OFFSET;
+  LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
+
+  __ move(trace_id_addr, id);
+  __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
+  __ store(id, trace_id_addr);
+
+#ifdef TRACE_ID_META_BITS
+  __ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
+#endif
+#ifdef TRACE_ID_SHIFT
+  __ unsigned_shift_right(id, TRACE_ID_SHIFT, id);
+#endif
+
+  __ move(id, rlock_result(x));
+}
+
+void LIRGenerator::do_getEventWriter(Intrinsic* x) {
+  LabelObj* L_end = new LabelObj();
+
+  LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
+                                           in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
+                                           T_OBJECT);
+  LIR_Opr result = rlock_result(x);
+  __ move_wide(jobj_addr, result);
+  __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
+  __ branch(lir_cond_equal, T_OBJECT, L_end->label());
+  __ move_wide(new LIR_Address(result, T_OBJECT), result);
+
+  __ branch_destination(L_end->label());
+}
+#endif
+
 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
     assert(x->number_of_arguments() == expected_arguments, "wrong type");
     LIR_Opr reg = result_register_for(x->type());
@@ -3115,11 +3160,15 @@
     break;
   }
 
-#ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
-  case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
+#ifdef JFR_HAVE_INTRINSICS
+  case vmIntrinsics::_getClassId:
+    do_ClassIDIntrinsic(x);
+    break;
+  case vmIntrinsics::_getEventWriter:
+    do_getEventWriter(x);
+    break;
   case vmIntrinsics::_counterTime:
-    do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
+    do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), 0, x);
     break;
 #endif
 
--- a/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -28,6 +28,7 @@
 #include "c1/c1_Instruction.hpp"
 #include "c1/c1_LIR.hpp"
 #include "ci/ciMethodData.hpp"
+#include "jfr/support/jfrIntrinsics.hpp"
 #include "utilities/sizes.hpp"
 
 // The classes responsible for code emission and register allocation
@@ -436,9 +437,9 @@
   void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
 
   void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x);
-#ifdef TRACE_HAVE_INTRINSICS
-  void do_ThreadIDIntrinsic(Intrinsic* x);
+#ifdef JFR_HAVE_INTRINSICS
   void do_ClassIDIntrinsic(Intrinsic* x);
+  void do_getEventWriter(Intrinsic* x);
 #endif
   ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
                         Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
--- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -41,6 +41,7 @@
 #include "gc_interface/collectedHeap.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/interpreter.hpp"
+#include "jfr/support/jfrIntrinsics.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/barrierSet.hpp"
 #include "memory/oopFactory.hpp"
@@ -296,8 +297,8 @@
   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
   FUNCTION_CASE(entry, is_instance_of);
   FUNCTION_CASE(entry, trace_block_entry);
-#ifdef TRACE_HAVE_INTRINSICS
-  FUNCTION_CASE(entry, TRACE_TIME_METHOD);
+#ifdef JFR_HAVE_INTRINSICS
+  FUNCTION_CASE(entry, JFR_TIME_FUNCTION);
 #endif
   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 
--- a/src/share/vm/ci/ciEnv.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/ci/ciEnv.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -40,6 +40,7 @@
 #include "compiler/compilerOracle.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
--- a/src/share/vm/ci/ciMethod.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/ci/ciMethod.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -93,12 +93,6 @@
   ciMethod(methodHandle h_m, ciInstanceKlass* holder);
   ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
 
-  Method* get_Method() const {
-    Method* m = (Method*)_metadata;
-    assert(m != NULL, "illegal use of unloaded method");
-    return m;
-  }
-
   oop loader() const                             { return _holder->loader(); }
 
   const char* type_string()                      { return "ciMethod"; }
@@ -156,6 +150,11 @@
     }
   }
 
+  Method* get_Method() const {
+    Method* m = (Method*)_metadata;
+    assert(m != NULL, "illegal use of unloaded method");
+    return m;
+  }
 
   // Method code and related information.
   address code()                                 { if (_code == NULL) load_code(); return _code; }
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -3884,14 +3884,14 @@
   access_flags.set_flags(flags);
 
   // This class and superclass
-  u2 this_class_index = cfs->get_u2_fast();
+  _this_class_index = cfs->get_u2_fast();
   check_property(
-    valid_cp_range(this_class_index, cp_size) &&
-      cp->tag_at(this_class_index).is_unresolved_klass(),
+    valid_cp_range(_this_class_index, cp_size) &&
+      cp->tag_at(_this_class_index).is_unresolved_klass(),
     "Invalid this class index %u in constant pool in class file %s",
-    this_class_index, CHECK_(nullHandle));
-
-  Symbol*  class_name  = cp->unresolved_klass_at(this_class_index);
+    _this_class_index, CHECK_(nullHandle));
+
+  Symbol*  class_name  = cp->unresolved_klass_at(_this_class_index);
   assert(class_name != NULL, "class_name can't be null");
 
   // It's important to set parsed_name *before* resolving the super class.
@@ -4122,9 +4122,9 @@
     // that changes, then InstanceKlass::idnum_can_increment()
     // has to be changed accordingly.
     this_klass->set_initial_method_idnum(methods->length());
-    this_klass->set_name(cp->klass_name_at(this_class_index));
+    this_klass->set_name(cp->klass_name_at(_this_class_index));
     if (is_anonymous())  // I am well known to myself
-      cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve
+      cp->klass_at_put(_this_class_index, this_klass()); // eagerly resolve
 
     this_klass->set_minor_version(minor_version);
     this_klass->set_major_version(major_version);
@@ -5273,3 +5273,25 @@
   }
   return NULL;
 }
+
+#if INCLUDE_JFR
+
+// Caller responsible for ResourceMark
+// clone stream with rewound position
+ClassFileStream* ClassFileParser::clone_stream() const {
+  assert(_stream != NULL, "invariant");
+
+  return _stream->clone();
+}
+
+void ClassFileParser::set_klass_to_deallocate(InstanceKlass* klass) {
+#ifdef ASSERT
+  if (klass != NULL) {
+    assert(NULL == _klass, "leaking?");
+  }
+#endif
+
+  _klass = klass;
+}
+
+#endif // INCLUDE_JFR
--- a/src/share/vm/classfile/classFileParser.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/classFileParser.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -47,6 +47,7 @@
   bool _relax_verify;
   u2   _major_version;
   u2   _minor_version;
+  u2   _this_class_index;
   Symbol* _class_name;
   ClassLoaderData* _loader_data;
   KlassHandle _host_klass;
@@ -491,6 +492,13 @@
   static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS);
   static void check_final_method_override(instanceKlassHandle this_klass, TRAPS);
   static void check_illegal_static_method(instanceKlassHandle this_klass, TRAPS);
+
+  u2 this_class_index() const { return _this_class_index; }
+
+#if INCLUDE_JFR
+  ClassFileStream* clone_stream() const;
+  void set_klass_to_deallocate(InstanceKlass* klass);
+#endif // INCLUDE_JFR
 };
 
 #endif // SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP
--- a/src/share/vm/classfile/classFileStream.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/classFileStream.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -30,12 +30,12 @@
   THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
 }
 
-ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) {
+ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source, bool need_verify) {
   _buffer_start = buffer;
   _buffer_end   = buffer + length;
   _current      = buffer;
   _source       = source;
-  _need_verify  = false;
+  _need_verify  = need_verify;
 }
 
 u1 ClassFileStream::get_u1(TRAPS) {
@@ -100,3 +100,31 @@
   }
   _current += length * 4;
 }
+
+#if INCLUDE_JFR
+
+u1* ClassFileStream::clone_buffer() const {
+  u1* const new_buffer_start = NEW_RESOURCE_ARRAY(u1, length());
+  memcpy(new_buffer_start, _buffer_start, length());
+  return new_buffer_start;
+}
+
+const char* const ClassFileStream::clone_source() const {
+  const char* const src = source();
+  char* source_copy = NULL;
+  if (src != NULL) {
+    size_t source_len = strlen(src);
+    source_copy = NEW_RESOURCE_ARRAY(char, source_len + 1);
+    strncpy(source_copy, src, source_len + 1);
+  }
+  return source_copy;
+}
+
+ClassFileStream* ClassFileStream::clone() const {
+  u1* const new_buffer_start = clone_buffer();
+  return new ClassFileStream(new_buffer_start,
+                             length(),
+                             clone_source(),
+                             need_verify());
+}
+#endif // INCLUDE_JFR
--- a/src/share/vm/classfile/classFileStream.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/classFileStream.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,15 +57,25 @@
   bool  _need_verify;  // True if verification is on for the class file
 
   void truncated_file_error(TRAPS);
+
+#if INCLUDE_JFR
+  u1* clone_buffer() const;
+  const char* const clone_source() const;
+#endif
+
  public:
   // Constructor
-  ClassFileStream(u1* buffer, int length, const char* source);
+  ClassFileStream(u1* buffer, int length, const char* source, bool need_verify = false);
 
   // Buffer access
   u1* buffer() const           { return _buffer_start; }
   int length() const           { return _buffer_end - _buffer_start; }
   u1* current() const          { return _current; }
   void set_current(u1* pos)    { _current = pos; }
+  // for relative positioning
+  juint current_offset() const {
+    return (juint)(_current - _buffer_start);
+  }
   const char* source() const   { return _source; }
   void set_verify(bool flag)   { _need_verify = flag; }
 
@@ -140,6 +150,12 @@
 
   // Tells whether eos is reached
   bool at_eos() const          { return _current == _buffer_end; }
+
+#if INCLUDE_JFR
+  ClassFileStream* clone() const;
+
+  bool need_verify() const { return _need_verify; }
+#endif
 };
 
 #endif // SHARE_VM_CLASSFILE_CLASSFILESTREAM_HPP
--- a/src/share/vm/classfile/classLoader.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/classLoader.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1134,6 +1134,15 @@
       }
       return h;
     }
+
+#if INCLUDE_JFR
+  {
+    InstanceKlass* ik = result();
+    ON_KLASS_CREATION(ik, parser, THREAD);
+    result = instanceKlassHandle(ik);
+  }
+#endif
+
     h = context.record_result(classpath_index, e, result, THREAD);
   } else {
     if (DumpSharedSpaces) {
--- a/src/share/vm/classfile/classLoaderData.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/classLoaderData.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -64,8 +64,10 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
-#if INCLUDE_TRACE
-#include "trace/tracing.hpp"
+#include "utilities/ticks.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#include "jfr/jfrEvents.hpp"
 #endif
 
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
@@ -81,7 +83,8 @@
   _claimed(0), _jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
   _next(NULL), _dependencies(dependencies),
   _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
-    // empty
+
+  JFR_ONLY(INIT_ID(this);)
 }
 
 void ClassLoaderData::init_dependencies(TRAPS) {
@@ -646,6 +649,16 @@
   }
 }
 
+void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  // Only walk the head until any clds not purged from prior unloading
+  // (CMS doesn't purge right away).
+  for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
+    assert(cld->is_unloading(), "invariant");
+    cl->do_cld(cld);
+  }
+}
+
 void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
   for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
     CLDClosure* closure = cld->keep_alive() ? strong : weak;
@@ -740,6 +753,28 @@
 }
 #endif // PRODUCT
 
+#if INCLUDE_JFR
+static Ticks class_unload_time;
+static void post_class_unload_event(Klass* const k) {
+  assert(k != NULL, "invariant");
+  EventClassUnload event(UNTIMED);
+  event.set_endtime(class_unload_time);
+  event.set_unloadedClass(k);
+  event.set_definingClassLoader(k->class_loader_data());
+  event.commit();
+}
+
+static void post_class_unload_events() {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  if (Jfr::is_enabled()) {
+    if (EventClassUnload::is_enabled()) {
+      class_unload_time = Ticks::now();
+      ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event);
+    }
+    Jfr::on_unloading_classes();
+  }
+}
+#endif // INCLUDE_JFR
 
 // Move class loader data from main list to the unloaded list for unloading
 // and deallocation later.
@@ -781,7 +816,7 @@
   }
 
   if (seen_dead_loader) {
-    post_class_unload_events();
+    JFR_ONLY(post_class_unload_events();)
   }
 
   return seen_dead_loader;
@@ -820,20 +855,6 @@
   Metaspace::purge();
 }
 
-void ClassLoaderDataGraph::post_class_unload_events(void) {
-#if INCLUDE_TRACE
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
-  if (Tracing::enabled()) {
-    if (Tracing::is_event_enabled(TraceClassUnloadEvent)) {
-      assert(_unloading != NULL, "need class loader data unload list!");
-      _class_unload_time = Ticks::now();
-      classes_unloading_do(&class_unload_event);
-    }
-    Tracing::on_unloading_classes();
-  }
-#endif
-}
-
 void ClassLoaderDataGraph::free_deallocate_lists() {
   for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
     // We need to keep this data until InstanceKlass::purge_previous_version has been
@@ -969,21 +990,3 @@
     class_loader()->print_value_on(out);
   }
 }
-
-#if INCLUDE_TRACE
-
-Ticks ClassLoaderDataGraph::_class_unload_time;
-
-void ClassLoaderDataGraph::class_unload_event(Klass* const k) {
-
-  // post class unload event
-  EventClassUnload event(UNTIMED);
-  event.set_endtime(_class_unload_time);
-  event.set_unloadedClass(k);
-  oop defining_class_loader = k->class_loader();
-  event.set_definingClassLoader(defining_class_loader != NULL ?
-                                defining_class_loader->klass() : (Klass*)NULL);
-  event.commit();
-}
-
-#endif // INCLUDE_TRACE
--- a/src/share/vm/classfile/classLoaderData.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/classLoaderData.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,8 +32,8 @@
 #include "runtime/mutex.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_TRACE
-#include "utilities/ticks.hpp"
+#if INCLUDE_JFR
+#include "jfr/support/jfrTraceIdExtension.hpp"
 #endif
 
 //
@@ -70,7 +70,6 @@
   static bool _should_purge;
 
   static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
-  static void post_class_unload_events(void);
   static void clean_metaspaces();
  public:
   static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
@@ -82,6 +81,7 @@
   static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   // cld do
   static void cld_do(CLDClosure* cl);
+  static void cld_unloading_do(CLDClosure* cl);
   static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
   static void keep_alive_cld_do(CLDClosure* cl);
   static void always_strong_cld_do(CLDClosure* cl);
@@ -116,12 +116,6 @@
 #ifndef PRODUCT
   static bool contains_loader_data(ClassLoaderData* loader_data);
 #endif
-
-#if INCLUDE_TRACE
- private:
-  static Ticks _class_unload_time;
-  static void class_unload_event(Klass* const k);
-#endif
 };
 
 // ClassLoaderData class
@@ -213,6 +207,8 @@
   static Metaspace* _ro_metaspace;
   static Metaspace* _rw_metaspace;
 
+  JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
+
   void set_next(ClassLoaderData* next) { _next = next; }
   ClassLoaderData* next() const        { return _next; }
 
@@ -223,11 +219,6 @@
 
   Mutex* metaspace_lock() const { return _metaspace_lock; }
 
-  // GC interface.
-  void clear_claimed()          { _claimed = 0; }
-  bool claimed() const          { return _claimed == 1; }
-  bool claim();
-
   void unload();
   bool keep_alive() const       { return _keep_alive; }
   void classes_do(void f(Klass*));
@@ -242,6 +233,11 @@
 
  public:
 
+  // GC interface.
+  void clear_claimed()          { _claimed = 0; }
+  bool claimed() const          { return _claimed == 1; }
+  bool claim();
+
   bool is_alive(BoolObjectClosure* is_alive_closure) const;
 
   // Accessors
@@ -325,6 +321,8 @@
   Metaspace* ro_metaspace();
   Metaspace* rw_metaspace();
   void initialize_shared_metaspaces();
+
+  JFR_ONLY(DEFINE_TRACE_ID_METHODS;)
 };
 
 // An iterator that distributes Klasses to parallel worker threads.
--- a/src/share/vm/classfile/javaClasses.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1047,7 +1047,8 @@
 
 // Read thread status value from threadStatus field in java.lang.Thread java class.
 java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) {
-  assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() ||
+  assert(Threads_lock->owned_by_self() || Thread::current()->is_Watcher_thread() ||
+         Thread::current()->is_VM_thread() ||
          JavaThread::current()->thread_state() == _thread_in_vm,
          "Java Thread is not running in vm");
   // The threadStatus is only present starting in 1.5
--- a/src/share/vm/classfile/systemDictionary.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -38,6 +38,7 @@
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/interpreter.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/filemap.hpp"
 #include "memory/gcLocker.hpp"
 #include "memory/oopFactory.hpp"
@@ -64,9 +65,6 @@
 #include "services/threadService.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
-#if INCLUDE_TRACE
-#include "trace/tracing.hpp"
-#endif
 
 Dictionary*            SystemDictionary::_dictionary          = NULL;
 PlaceholderTable*      SystemDictionary::_placeholders        = NULL;
@@ -598,6 +596,22 @@
   return (nh);
 }
 
+// utility function for class load event
+static void post_class_load_event(EventClassLoad &event,
+                                  instanceKlassHandle k,
+                                  Handle initiating_loader) {
+#if INCLUDE_JFR
+  if (event.should_commit()) {
+    event.set_loadedClass(k());
+    event.set_definingClassLoader(k->class_loader_data());
+    oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader();
+    event.set_initiatingClassLoader(class_loader != NULL ?
+                                    ClassLoaderData::class_loader_data_or_null(class_loader) :
+                                    (ClassLoaderData*)NULL);
+    event.commit();
+  }
+#endif // INCLUDE_JFR
+}
 
 Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
                                                         Handle class_loader,
@@ -606,7 +620,7 @@
   assert(name != NULL && !FieldType::is_array(name) &&
          !FieldType::is_obj(name), "invalid class name");
 
-  Ticks class_load_start_time = Ticks::now();
+  EventClassLoad class_load_start_event;
 
   // UseNewReflection
   // Fix for 4474172; see evaluation for more details
@@ -857,7 +871,7 @@
     return NULL;
   }
 
-  post_class_load_event(class_load_start_time, k, class_loader);
+  post_class_load_event(class_load_start_event, k, class_loader);
 
 #ifdef ASSERT
   {
@@ -982,7 +996,7 @@
                                       TRAPS) {
   TempNewSymbol parsed_name = NULL;
 
-  Ticks class_load_start_time = Ticks::now();
+  EventClassLoad class_load_start_event;
 
   ClassLoaderData* loader_data;
   if (host_klass.not_null()) {
@@ -1043,7 +1057,7 @@
         JvmtiExport::post_class_load((JavaThread *) THREAD, k());
     }
 
-    post_class_load_event(class_load_start_time, k, class_loader);
+    post_class_load_event(class_load_start_event, k, class_loader);
   }
   assert(host_klass.not_null() || cp_patches == NULL,
          "cp_patches only found with host_klass");
@@ -1085,12 +1099,13 @@
   //
   // Note: "name" is updated.
 
-  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
-                                                             loader_data,
-                                                             protection_domain,
-                                                             parsed_name,
-                                                             verify,
-                                                             THREAD);
+  ClassFileParser parser(st);
+  instanceKlassHandle k = parser.parseClassFile(class_name,
+                                                loader_data,
+                                                protection_domain,
+                                                parsed_name,
+                                                verify,
+                                                THREAD);
 
   const char* pkg = "java/";
   size_t pkglen = strlen(pkg);
@@ -1125,6 +1140,14 @@
     assert(is_internal_format(parsed_name),
            "external class name format used internally");
 
+#if INCLUDE_JFR
+    {
+      InstanceKlass* ik = k();
+      ON_KLASS_CREATION(ik, parser, THREAD);
+      k = instanceKlassHandle(ik);
+    }
+#endif
+
     // Add class just loaded
     // If a class loader supports parallel classloading handle parallel define requests
     // find_or_define_instance_class may return a different InstanceKlass
@@ -1385,6 +1408,15 @@
   }
 }
 
+static void post_class_define_event(InstanceKlass* k, const ClassLoaderData* def_cld) {
+  EventClassDefine event;
+  if (event.should_commit()) {
+    event.set_definedClass(k);
+    event.set_definingClassLoader(def_cld);
+    event.commit();
+  }
+}
+
 void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
 
   ClassLoaderData* loader_data = k->class_loader_data();
@@ -1455,6 +1487,7 @@
 
   }
 
+  post_class_define_event(k(), loader_data);
 }
 
 // Support parallel classloading
@@ -2687,26 +2720,6 @@
   constraints()->verify(dictionary(), placeholders());
 }
 
-// utility function for class load event
-void SystemDictionary::post_class_load_event(const Ticks& start_time,
-                                             instanceKlassHandle k,
-                                             Handle initiating_loader) {
-#if INCLUDE_TRACE
-  EventClassLoad event(UNTIMED);
-  if (event.should_commit()) {
-    event.set_starttime(start_time);
-    event.set_loadedClass(k());
-    oop defining_class_loader = k->class_loader();
-    event.set_definingClassLoader(defining_class_loader !=  NULL ?
-                                    defining_class_loader->klass() : (Klass*)NULL);
-    oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader();
-    event.set_initiatingClassLoader(class_loader != NULL ?
-                                      class_loader->klass() : (Klass*)NULL);
-    event.commit();
-  }
-#endif // INCLUDE_TRACE
-}
-
 #ifndef PRODUCT
 
 // statistics code
--- a/src/share/vm/classfile/systemDictionary.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/systemDictionary.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -77,7 +77,6 @@
 template <MEMFLAGS F> class HashtableBucket;
 class ResolutionErrorTable;
 class SymbolPropertyTable;
-class Ticks;
 
 // Certain classes are preloaded, such as java.lang.Object and java.lang.String.
 // They are all "well-known", in the sense that no class loader is allowed
@@ -654,9 +653,6 @@
   // Setup link to hierarchy
   static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
 
-  // event based tracing
-  static void post_class_load_event(const Ticks& start_time, instanceKlassHandle k,
-                                    Handle initiating_loader);
   // We pass in the hashtable index so we can calculate it outside of
   // the SystemDictionary_lock.
 
--- a/src/share/vm/classfile/vmSymbols.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/vmSymbols.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -328,7 +328,7 @@
 bool vmIntrinsics::should_be_pinned(vmIntrinsics::ID id) {
   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
   switch(id) {
-#ifdef TRACE_HAVE_INTRINSICS
+#ifdef JFR_HAVE_INTRINSICS
   case vmIntrinsics::_counterTime:
 #endif
   case vmIntrinsics::_currentTimeMillis:
--- a/src/share/vm/classfile/vmSymbols.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/classfile/vmSymbols.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -25,9 +25,10 @@
 #ifndef SHARE_VM_CLASSFILE_VMSYMBOLS_HPP
 #define SHARE_VM_CLASSFILE_VMSYMBOLS_HPP
 
-#include "oops/symbol.hpp"
+#include "jfr/support/jfrIntrinsics.hpp"
 #include "memory/iterator.hpp"
-#include "trace/traceMacros.hpp"
+#include "oops/symbol.hpp"
+#include "utilities/macros.hpp"
 
 // The class vmSymbols is a name space for fast lookup of
 // symbols commonly used in the VM.
@@ -606,8 +607,8 @@
   template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
   template(classLoader_name,                           "classLoader")                                             \
                                                                                                                   \
-  /* trace signatures */                                                                                          \
-  TRACE_TEMPLATES(template)                                                                                       \
+  /* jfr signatures */                                                                                            \
+  JFR_TEMPLATES(template)                                                                                         \
                                                                                                                   \
   /*end*/
 
@@ -736,7 +737,7 @@
   do_intrinsic(_nanoTime,                 java_lang_System,       nanoTime_name,          void_long_signature,   F_S)   \
    do_name(     nanoTime_name,                                   "nanoTime")                                            \
                                                                                                                         \
-  TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)                                             \
+  JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)                                               \
                                                                                                                         \
   do_intrinsic(_arraycopy,                java_lang_System,       arraycopy_name, arraycopy_signature,           F_S)   \
    do_name(     arraycopy_name,                                  "arraycopy")                                           \
--- a/src/share/vm/code/codeCache.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/code/codeCache.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,6 +32,7 @@
 #include "code/pcDesc.hpp"
 #include "compiler/compileBroker.hpp"
 #include "gc_implementation/shared/markSweep.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/gcLocker.hpp"
 #include "memory/iterator.hpp"
@@ -46,9 +47,9 @@
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "services/memoryService.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/xmlstream.hpp"
 
+
 // Helper class for printing in CodeCache
 
 class CodeBlob_sizes {
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -30,6 +30,7 @@
 #include "compiler/compileLog.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/methodData.hpp"
 #include "oops/method.hpp"
@@ -43,7 +44,6 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #ifdef COMPILER1
@@ -1913,6 +1913,19 @@
   tty->print("%s", s.as_string());
 }
 
+static void post_compilation_event(EventCompilation* event, CompileTask* task) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  event->set_method(task->method());
+  event->set_compileId(task->compile_id());
+  event->set_compileLevel(task->comp_level());
+  event->set_succeded(task->is_success());
+  event->set_isOsr(task->osr_bci() != CompileBroker::standard_entry_bci);
+  event->set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size());
+  event->set_inlinedBytes(task->num_inlined_bytecodes());
+  event->commit();
+}
+
 // ------------------------------------------------------------------
 // CompileBroker::invoke_compiler_on_method
 //
@@ -2010,8 +2023,9 @@
     compilable = ci_env.compilable();
 
     if (ci_env.failing()) {
-      task->set_failure_reason(ci_env.failure_reason());
+      const char *failure_reason = ci_env.failure_reason();
       const char* retry_message = ci_env.retry_message();
+      task->set_failure_reason(failure_reason);
       if (_compilation_log != NULL) {
         _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
       }
@@ -2021,6 +2035,13 @@
             err_msg_res("COMPILE SKIPPED: %s",      ci_env.failure_reason());
         task->print_compilation(tty, msg);
       }
+
+      EventCompilationFailure event;
+      if (event.should_commit()) {
+        event.set_compileId(compile_id);
+        event.set_failureMessage(failure_reason);
+        event.commit();
+      }
     } else {
       task->mark_success();
       task->set_num_inlined_bytecodes(ci_env.num_inlined_bytecodes());
@@ -2034,14 +2055,7 @@
     // simulate crash during compilation
     assert(task->compile_id() != CICrashAt, "just as planned");
     if (event.should_commit()) {
-      event.set_method(target->get_Method());
-      event.set_compileID(compile_id);
-      event.set_compileLevel(task->comp_level());
-      event.set_succeded(task->is_success());
-      event.set_isOsr(is_osr);
-      event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size());
-      event.set_inlinedBytes(task->num_inlined_bytecodes());
-      event.commit();
+      post_compilation_event(&event, task);
     }
   }
   pop_jni_handle_block();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -4089,7 +4089,7 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        g1_policy()->record_collection_pause_start(sample_start_time_sec);
+        g1_policy()->record_collection_pause_start(sample_start_time_sec, *_gc_tracer_stw);
 
         double scan_wait_start = os::elapsedTime();
         // We have to wait until the CM threads finish scanning the
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -75,7 +75,6 @@
 class G1OldTracer;
 class EvacuationFailedInfo;
 class nmethod;
-class Ticks;
 
 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -839,11 +839,11 @@
   _stop_world_start = os::elapsedTime();
 }
 
-void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
+void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, GCTracer &tracer) {
   // We only need to do this here as the policy will only be applied
   // to the GC we're about to start. so, no point is calculating this
   // every time we calculate / recalculate the target young length.
-  update_survivors_policy();
+  update_survivors_policy(tracer);
 
   assert(_g1->used() == _g1->recalculate_used(),
          err_msg("sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
@@ -1453,7 +1453,7 @@
 }
 
 // Calculates survivor space parameters.
-void G1CollectorPolicy::update_survivors_policy() {
+void G1CollectorPolicy::update_survivors_policy(GCTracer &tracer) {
   double max_survivor_regions_d =
                  (double) _young_list_target_length / (double) SurvivorRatio;
   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
@@ -1461,7 +1461,7 @@
   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
 
   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
-        HeapRegion::GrainWords * _max_survivor_regions);
+        HeapRegion::GrainWords * _max_survivor_regions, tracer);
 }
 
 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -672,7 +672,7 @@
   bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
 
   // Record the start and end of an evacuation pause.
-  void record_collection_pause_start(double start_time_sec);
+  void record_collection_pause_start(double start_time_sec, GCTracer &tracer);
   void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
 
   // Record the start and end of a full collection.
@@ -920,7 +920,7 @@
   void update_max_gc_locker_expansion();
 
   // Calculates survivor space parameters.
-  void update_survivors_policy();
+  void update_survivors_policy(GCTracer &tracer);
 
   virtual void post_heap_initialize();
 };
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -583,13 +583,12 @@
 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
     _phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
   if (_phase_times != NULL) {
-    _start_time = os::elapsedTime();
+    _start_time = Ticks::now();
   }
 }
 
 G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
   if (_phase_times != NULL) {
-    _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
+    _phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds());
   }
 }
-
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -278,7 +278,7 @@
 };
 
 class G1GCParPhaseTimesTracker : public StackObj {
-  double _start_time;
+  Ticks _start_time;
   G1GCPhaseTimes::GCParPhases _phase;
   G1GCPhaseTimes* _phase_times;
   uint _worker_id;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1HeapRegionEventSender.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "g1HeapRegionEventSender.hpp"
+#include "jfr/jfrEvents.hpp"
+
+class DumpEventInfoClosure : public HeapRegionClosure {
+public:
+  bool doHeapRegion(HeapRegion* r) {
+    EventG1HeapRegionInformation evt;
+    evt.set_index(r->hrm_index());
+    // XXX TODO evt.set_type(r->get_trace_type());
+    evt.set_start((uintptr_t)r->bottom());
+    evt.set_used(r->used());
+    evt.commit();
+    return false;
+  }
+};
+
+
+void G1HeapRegionEventSender::send_events() {
+  DumpEventInfoClosure c;
+
+  G1CollectedHeap::heap()->heap_region_iterate(&c);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1HeapRegionEventSender.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HEAPREGIONEVENTSENDER_HPP
+#define SHARE_VM_GC_G1_G1HEAPREGIONEVENTSENDER_HPP
+
+#include "memory/allocation.hpp"
+
+class G1HeapRegionEventSender : public AllStatic {
+public:
+  static void send_events();
+};
+
+#endif // SHARE_VM_GC_G1_G1HEAPREGIONEVENTSENDER_HPP
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/g1/g1MMUTracker.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/ostream.hpp"
@@ -105,6 +106,9 @@
     ++_no_entries;
   }
   _array[_head_index] = G1MMUTrackerQueueElem(start, end);
+
+  double slice_time = calculate_gc_time(end);
+  G1MMUTracer::report_mmu(_time_slice, slice_time, _max_gc_time);
 }
 
 // basically the _internal call does not remove expired entries
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -52,6 +52,9 @@
 #include "runtime/vmThread.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/events.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif // INCLUDE_JFR
 
 class HeapRegion;
 
@@ -261,6 +264,7 @@
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
+  JFR_ONLY(Jfr::weak_oops_do(&GenMarkSweep::adjust_pointer_closure));
 
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -187,6 +187,21 @@
   }
 }
 
+void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
+                                                  oop const old, size_t word_sz, uint age,
+                                                  HeapWord * const obj_ptr,
+                                                  AllocationContext_t context) const {
+  ParGCAllocBuffer* alloc_buf = _g1_par_allocator->alloc_buffer(dest_state, context);
+  if (alloc_buf->contains(obj_ptr)) {
+    _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
+                                                             dest_state.value() == InCSetState::Old,
+                                                             alloc_buf->word_sz());
+  } else {
+    _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
+                                                              dest_state.value() == InCSetState::Old);
+  }
+}
+
 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
   if (state.is_young()) {
     age = !m->has_displaced_mark_helper() ? m->age()
@@ -225,6 +240,10 @@
         return _g1h->handle_evacuation_failure_par(this, old);
       }
     }
+    if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
+      // The events are checked individually as part of the actual commit
+      report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
+    }
   }
 
   assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -212,6 +212,10 @@
                                   size_t word_sz,
                                   AllocationContext_t const context);
 
+  void report_promotion_event(InCSetState const dest_state,
+                              oop const old, size_t word_sz, uint age,
+                              HeapWord * const obj_ptr, AllocationContext_t context) const;
+
   inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
  public:
 
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1048,7 +1048,7 @@
 
     assert(to()->is_empty(), "to space should be empty now");
 
-    adjust_desired_tenuring_threshold();
+    adjust_desired_tenuring_threshold(gc_tracer);
   } else {
     handle_promotion_failed(gch, thread_state_set, gc_tracer);
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -53,6 +53,9 @@
 #include "services/memoryService.hpp"
 #include "utilities/events.hpp"
 #include "utilities/stack.inline.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif // INCLUDE_JFR
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -624,6 +627,7 @@
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(adjust_pointer_closure());
+  JFR_ONLY(Jfr::weak_oops_do(adjust_pointer_closure()));
 
   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -58,6 +58,9 @@
 #include "services/memTracker.hpp"
 #include "utilities/events.hpp"
 #include "utilities/stack.inline.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif // INCLUDE_JFR
 
 #include <math.h>
 
@@ -2464,6 +2467,7 @@
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(adjust_pointer_closure());
+  JFR_ONLY(Jfr::weak_oops_do(adjust_pointer_closure()));
 
   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -148,6 +148,10 @@
     claimed_stack_depth()->push(p);
   }
 
+  inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
+                                    uint age, bool tenured,
+                                    const PSPromotionLAB* lab);
+
  protected:
   static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
  public:
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -64,6 +64,33 @@
   claim_or_forward_internal_depth(p);
 }
 
+inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
+                                                      size_t obj_size,
+                                                      uint age, bool tenured,
+                                                      const PSPromotionLAB* lab) {
+  // Skip if memory allocation failed
+  if (new_obj != NULL) {
+    const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
+
+    if (lab != NULL) {
+      // Promotion of object through newly allocated PLAB
+      if (gc_tracer->should_report_promotion_in_new_plab_event()) {
+        size_t obj_bytes = obj_size * HeapWordSize;
+        size_t lab_size = lab->capacity();
+        gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
+                                                      age, tenured, lab_size);
+      }
+    } else {
+      // Promotion of object directly to heap
+      if (gc_tracer->should_report_promotion_outside_plab_event()) {
+        size_t obj_bytes = obj_size * HeapWordSize;
+        gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
+                                                       age, tenured);
+      }
+    }
+  }
+}
+
 //
 // This method is pretty bulky. It would be nice to split it up
 // into smaller submethods, but we need to be careful not to hurt
@@ -85,11 +112,11 @@
     bool new_obj_is_tenured = false;
     size_t new_obj_size = o->size();
 
+    // Find the objects age, MT safe.
+    uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+      test_mark->displaced_mark_helper()->age() : test_mark->age();
+
     if (!promote_immediately) {
-      // Find the objects age, MT safe.
-      uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
-        test_mark->displaced_mark_helper()->age() : test_mark->age();
-
       // Try allocating obj in to-space (unless too old)
       if (age < PSScavenge::tenuring_threshold()) {
         new_obj = (oop) _young_lab.allocate(new_obj_size);
@@ -98,6 +125,7 @@
           if (new_obj_size > (YoungPLABSize / 2)) {
             // Allocate this object directly
             new_obj = (oop)young_space()->cas_allocate(new_obj_size);
+            promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
           } else {
             // Flush and fill
             _young_lab.flush();
@@ -107,6 +135,7 @@
               _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
               // Try the young lab allocation again.
               new_obj = (oop) _young_lab.allocate(new_obj_size);
+              promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
             } else {
               _young_gen_is_full = true;
             }
@@ -132,6 +161,7 @@
           if (new_obj_size > (OldPLABSize / 2)) {
             // Allocate this object directly
             new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
+            promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
           } else {
             // Flush and fill
             _old_lab.flush();
@@ -148,6 +178,7 @@
               _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
               // Try the old lab allocation again.
               new_obj = (oop) _old_lab.allocate(new_obj_size);
+              promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
             }
           }
         }
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -92,6 +92,7 @@
 
   // Private accessors
   static CardTableExtension* const card_table()       { assert(_card_table != NULL, "Sanity"); return _card_table; }
+  static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
 
  public:
   // Accessors
--- a/src/share/vm/gc_implementation/shared/ageTable.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/ageTable.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/shared/ageTable.hpp"
+#include "gc_implementation/shared/ageTableTracer.hpp"
 #include "gc_implementation/shared/gcPolicyCounters.hpp"
 #include "memory/collectorPolicy.hpp"
 #include "memory/resourceArea.hpp"
@@ -78,7 +79,7 @@
   }
 }
 
-uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
+uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCTracer &tracer) {
   size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
   size_t total = 0;
   uint age = 1;
@@ -92,7 +93,7 @@
   }
   uint result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
 
-  if (PrintTenuringDistribution || UsePerfData) {
+  if (PrintTenuringDistribution || UsePerfData || AgeTableTracer::is_tenuring_distribution_event_enabled()) {
 
     if (PrintTenuringDistribution) {
       gclog_or_tty->cr();
@@ -110,6 +111,7 @@
                                         age,    sizes[age]*oopSize,          total*oopSize);
         }
       }
+      AgeTableTracer::send_tenuring_distribution_event(age, wordSize * oopSize, tracer);
       if (UsePerfData) {
         _perf_sizes[age]->set_value(sizes[age]*oopSize);
       }
--- a/src/share/vm/gc_implementation/shared/ageTable.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/ageTable.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -27,6 +27,7 @@
 
 #include "oops/markOop.hpp"
 #include "oops/oop.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "runtime/perfData.hpp"
 
 /* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University.
@@ -69,7 +70,7 @@
   void merge_par(ageTable* subTable);
 
   // calculate new tenuring threshold based on age information
-  uint compute_tenuring_threshold(size_t survivor_capacity);
+  uint compute_tenuring_threshold(size_t survivor_capacity, GCTracer &tracer);
 
  private:
   PerfVariable* _perf_sizes[table_size];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/ageTableTracer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/ageTableTracer.hpp"
+#include "gc_implementation/shared/gcId.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "jfr/jfrEvents.hpp"
+
+void AgeTableTracer::send_tenuring_distribution_event(uint age, size_t size, GCTracer &tracer) {
+  EventTenuringDistribution e;
+  if (e.should_commit()) {
+    e.set_gcId(tracer.gc_id().id());
+    e.set_age(age);
+    e.set_size(size);
+    e.commit();
+  }
+}
+
+bool AgeTableTracer::is_tenuring_distribution_event_enabled() {
+  return EventTenuringDistribution::is_enabled();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/ageTableTracer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_AGETABLETRACER_HPP
+#define SHARE_VM_GC_SHARED_AGETABLETRACER_HPP
+
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "memory/allocation.hpp"
+
+class AgeTableTracer : AllStatic {
+  public:
+    static void send_tenuring_distribution_event(uint age, size_t size, GCTracer &tracer);
+    static bool is_tenuring_distribution_event_enabled();
+};
+
+#endif // SHARE_VM_GC_SHARED_AGETABLETRACER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcConfiguration.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "gc_interface/collectedHeap.hpp"
+#include "gc_implementation/shared/gcConfiguration.hpp"
+#include "memory/universe.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
+
+GCName GCConfiguration::young_collector() const {
+  if (UseG1GC) {
+    return G1New;
+  }
+
+  if (UseParallelGC) {
+    return ParallelScavenge;
+  }
+
+  if (UseConcMarkSweepGC) {
+    return ParNew;
+  }
+
+  return DefNew;
+}
+
+GCName GCConfiguration::old_collector() const {
+  if (UseG1GC) {
+    return G1Old;
+  }
+
+  if (UseConcMarkSweepGC) {
+    return ConcurrentMarkSweep;
+  }
+
+  if (UseParallelOldGC) {
+    return ParallelOld;
+  }
+
+  return SerialOld;
+}
+
+uint GCConfiguration::num_parallel_gc_threads() const {
+  return ParallelGCThreads;
+}
+
+uint GCConfiguration::num_concurrent_gc_threads() const {
+  return ConcGCThreads;
+}
+
+bool GCConfiguration::uses_dynamic_gc_threads() const {
+  return UseDynamicNumberOfGCThreads;
+}
+
+bool GCConfiguration::is_explicit_gc_concurrent() const {
+  return ExplicitGCInvokesConcurrent;
+}
+
+bool GCConfiguration::is_explicit_gc_disabled() const {
+  return DisableExplicitGC;
+}
+
+bool GCConfiguration::has_pause_target_default_value() const {
+  return FLAG_IS_DEFAULT(MaxGCPauseMillis);
+}
+
+uintx GCConfiguration::pause_target() const {
+  return MaxGCPauseMillis;
+}
+
+uintx GCConfiguration::gc_time_ratio() const {
+  return GCTimeRatio;
+}
+
+bool GCTLABConfiguration::uses_tlabs() const {
+  return UseTLAB;
+}
+
+size_t GCTLABConfiguration::min_tlab_size() const {
+  return MinTLABSize;
+}
+
+uint GCTLABConfiguration::tlab_refill_waste_limit() const {
+  return TLABRefillWasteFraction;
+}
+
+intx GCSurvivorConfiguration::max_tenuring_threshold() const {
+  return MaxTenuringThreshold;
+}
+
+intx GCSurvivorConfiguration::initial_tenuring_threshold() const {
+  return InitialTenuringThreshold;
+}
+
+size_t GCHeapConfiguration::max_size() const {
+  return MaxHeapSize;
+}
+
+size_t GCHeapConfiguration::min_size() const {
+  return Arguments::min_heap_size();
+}
+
+size_t GCHeapConfiguration::initial_size() const {
+  return InitialHeapSize;
+}
+
+bool GCHeapConfiguration::uses_compressed_oops() const {
+  return UseCompressedOops;
+}
+
+Universe::NARROW_OOP_MODE GCHeapConfiguration::narrow_oop_mode() const {
+  return Universe::narrow_oop_mode();
+}
+
+uint GCHeapConfiguration::object_alignment_in_bytes() const {
+  return ObjectAlignmentInBytes;
+}
+
+int GCHeapConfiguration::heap_address_size_in_bits() const {
+  return BitsPerHeapOop;
+}
+
+bool GCYoungGenerationConfiguration::has_max_size_default_value() const {
+  return FLAG_IS_DEFAULT(MaxNewSize);
+}
+
+uintx GCYoungGenerationConfiguration::max_size() const {
+  return MaxNewSize;
+}
+
+uintx GCYoungGenerationConfiguration::min_size() const {
+  return NewSize;
+}
+
+intx GCYoungGenerationConfiguration::new_ratio() const {
+  return NewRatio;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcConfiguration.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_GCCONFIGURATION_HPP
+#define SHARE_VM_GC_SHARED_GCCONFIGURATION_HPP
+
+#include "gc_interface/gcName.hpp"
+#include "memory/universe.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class GCConfiguration {
+ public:
+  GCName young_collector() const;
+  GCName old_collector() const;
+  uint num_parallel_gc_threads() const;
+  uint num_concurrent_gc_threads() const;
+  bool uses_dynamic_gc_threads() const;
+  bool is_explicit_gc_concurrent() const;
+  bool is_explicit_gc_disabled() const;
+  uintx gc_time_ratio() const;
+
+  bool has_pause_target_default_value() const;
+  uintx pause_target() const;
+};
+
+class GCTLABConfiguration {
+ public:
+  bool uses_tlabs() const;
+  size_t min_tlab_size() const;
+  uint tlab_refill_waste_limit() const;
+};
+
+class GCSurvivorConfiguration {
+ public:
+  intx initial_tenuring_threshold() const;
+  intx max_tenuring_threshold() const;
+};
+
+class GCHeapConfiguration {
+ public:
+  size_t max_size() const;
+  size_t min_size() const;
+  size_t initial_size() const;
+  bool uses_compressed_oops() const;
+  Universe::NARROW_OOP_MODE narrow_oop_mode() const;
+  uint object_alignment_in_bytes() const;
+  int heap_address_size_in_bits() const;
+};
+
+class GCYoungGenerationConfiguration {
+ public:
+  bool has_max_size_default_value() const;
+  uintx max_size() const;
+
+  uintx min_size() const;
+  intx new_ratio() const;
+};
+
+#endif // SHARE_VM_GC_SHARED_GCCONFIGURATION_HPP
--- a/src/share/vm/gc_implementation/shared/gcTimer.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/gcTimer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "utilities/growableArray.hpp"
-#include "utilities/ticks.inline.hpp"
+#include "utilities/ticks.hpp"
 
 // the "time" parameter for most functions
 // has a default value set by Ticks::now()
@@ -349,7 +349,7 @@
     GCTimer gc_timer;
     gc_timer.register_gc_start(1);
 
-    assert(gc_timer.gc_start() == 1, "Incorrect");
+    assert(gc_timer.gc_start() == Ticks(1), "Incorrect");
   }
 
   static void gc_end() {
@@ -357,7 +357,7 @@
     gc_timer.register_gc_start(1);
     gc_timer.register_gc_end(2);
 
-    assert(gc_timer.gc_end() == 2, "Incorrect");
+    assert(gc_timer.gc_end() == Ticks(2), "Incorrect");
   }
 };
 
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -33,7 +33,7 @@
 #include "memory/referenceProcessorStats.hpp"
 #include "runtime/os.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "utilities/ticks.inline.hpp"
+#include "utilities/ticks.hpp"
 
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/evacuationInfo.hpp"
@@ -172,6 +172,30 @@
   _tenuring_threshold = tenuring_threshold;
 }
 
+bool YoungGCTracer::should_report_promotion_events() const {
+  return should_report_promotion_in_new_plab_event() ||
+          should_report_promotion_outside_plab_event();
+}
+
+bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
+  return should_send_promotion_in_new_plab_event();
+}
+
+bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
+  return should_send_promotion_outside_plab_event();
+}
+
+void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                                       uint age, bool tenured,
+                                                       size_t plab_size) const {
+  send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
+}
+
+void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                                        uint age, bool tenured) const {
+  send_promotion_outside_plab_event(klass, obj_size, age, tenured);
+}
+
 void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
 
@@ -199,6 +223,12 @@
 }
 
 #if INCLUDE_ALL_GCS
+void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) {
+  send_g1_mmu_event(time_slice_sec * MILLIUNITS,
+                    gc_time_sec * MILLIUNITS,
+                    max_time_sec * MILLIUNITS);
+}
+
 void G1NewTracer::report_yc_type(G1YCType type) {
   assert_set_gc_id();
 
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -156,10 +156,39 @@
  public:
   void report_promotion_failed(const PromotionFailedInfo& pf_info);
   void report_tenuring_threshold(const uint tenuring_threshold);
+  /*
+   * Methods for reporting Promotion in new or outside PLAB Events.
+   *
+   * The object age is always required as it is not certain that the mark word
+   * of the oop can be trusted at this stage.
+   *
+   * obj_size is the size of the promoted object in bytes.
+   *
+   * tenured should be true if the object has been promoted to the old
+   * space during this GC, if the object is copied to survivor space
+   * from young space or survivor space (aging) tenured should be false.
+   *
+   * plab_size is the size of the newly allocated PLAB in bytes.
+   */
+  bool should_report_promotion_events() const;
+  bool should_report_promotion_in_new_plab_event() const;
+  bool should_report_promotion_outside_plab_event() const;
+  void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                          uint age, bool tenured,
+                                          size_t plab_size) const;
+  void report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                           uint age, bool tenured) const;
 
  private:
   void send_young_gc_event() const;
   void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const;
+  bool should_send_promotion_in_new_plab_event() const;
+  bool should_send_promotion_outside_plab_event() const;
+  void send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                        uint age, bool tenured,
+                                        size_t plab_size) const;
+  void send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                         uint age, bool tenured) const;
 };
 
 class OldGCTracer : public GCTracer {
@@ -210,6 +239,13 @@
 };
 
 #if INCLUDE_ALL_GCS
+class G1MMUTracer : public AllStatic {
+  static void send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms);
+
+ public:
+  static void report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec);
+};
+
 class G1NewTracer : public YoungGCTracer {
   G1YoungGCInfo _g1_young_gc_info;
 
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -23,14 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "runtime/os.hpp"
-#include "trace/tracing.hpp"
-#include "trace/traceBackend.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -41,7 +40,7 @@
 typedef uintptr_t TraceAddress;
 
 void GCTracer::send_garbage_collection_event() const {
-  EventGCGarbageCollection event(UNTIMED);
+  EventGarbageCollection event(UNTIMED);
   if (event.should_commit()) {
     event.set_gcId(_shared_gc_info.gc_id().id());
     event.set_name(_shared_gc_info.name());
@@ -89,7 +88,7 @@
 }
 
 void ParallelOldTracer::send_parallel_old_event() const {
-  EventGCParallelOld e(UNTIMED);
+  EventParallelOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
@@ -100,7 +99,7 @@
 }
 
 void YoungGCTracer::send_young_gc_event() const {
-  EventGCYoungGarbageCollection e(UNTIMED);
+  EventYoungGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_tenuringThreshold(_tenuring_threshold);
@@ -110,8 +109,46 @@
   }
 }
 
+bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
+  return EventPromoteObjectInNewPLAB::is_enabled();
+}
+
+bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
+  return EventPromoteObjectOutsidePLAB::is_enabled();
+}
+
+void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                                     uint age, bool tenured,
+                                                     size_t plab_size) const {
+
+  EventPromoteObjectInNewPLAB event;
+  if (event.should_commit()) {
+    event.set_gcId(_shared_gc_info.gc_id().id());
+    event.set_objectClass(klass);
+    event.set_objectSize(obj_size);
+    event.set_tenured(tenured);
+    event.set_tenuringAge(age);
+    event.set_plabSize(plab_size);
+    event.commit();
+  }
+}
+
+void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                                      uint age, bool tenured) const {
+
+  EventPromoteObjectOutsidePLAB event;
+  if (event.should_commit()) {
+    event.set_gcId(_shared_gc_info.gc_id().id());
+    event.set_objectClass(klass);
+    event.set_objectSize(obj_size);
+    event.set_tenured(tenured);
+    event.set_tenuringAge(age);
+    event.commit();
+  }
+}
+
 void OldGCTracer::send_old_gc_event() const {
-  EventGCOldGarbageCollection e(UNTIMED);
+  EventOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_starttime(_shared_gc_info.start_timestamp());
@@ -120,8 +157,8 @@
   }
 }
 
-static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
-  TraceStructCopyFailed failed_info;
+static JfrStructCopyFailed to_struct(const CopyFailedInfo& cf_info) {
+  JfrStructCopyFailed failed_info;
   failed_info.set_objectCount(cf_info.failed_count());
   failed_info.set_firstSize(cf_info.first_size());
   failed_info.set_smallestSize(cf_info.smallest_size());
@@ -133,7 +170,7 @@
   EventPromotionFailed e;
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.gc_id().id());
-    e.set_data(to_trace_struct(pf_info));
+    e.set_promotionFailed(to_struct(pf_info));
     e.set_thread(pf_info.thread()->thread_id());
     e.commit();
   }
@@ -150,7 +187,7 @@
 
 #if INCLUDE_ALL_GCS
 void G1NewTracer::send_g1_young_gc_event() {
-  EventGCG1GarbageCollection e(UNTIMED);
+  EventG1GarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_type(_g1_young_gc_info.type());
@@ -160,16 +197,27 @@
   }
 }
 
+void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) {
+  EventG1MMU e;
+  if (e.should_commit()) {
+    e.set_gcId(GCId::peek().id());
+    e.set_timeSlice((s8)time_slice_ms);
+    e.set_gcTime((s8)gc_time_ms);
+    e.set_pauseTarget((s8)max_time_ms);
+    e.commit();
+  }
+}
+
 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
-  EventEvacuationInfo e;
+  EventEvacuationInformation e;
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_cSetRegions(info->collectionset_regions());
     e.set_cSetUsedBefore(info->collectionset_used_before());
     e.set_cSetUsedAfter(info->collectionset_used_after());
     e.set_allocationRegions(info->allocation_regions());
-    e.set_allocRegionsUsedBefore(info->alloc_regions_used_before());
-    e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
+    e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before());
+    e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
     e.set_bytesCopied(info->bytes_copied());
     e.set_regionsFreed(info->regions_freed());
     e.commit();
@@ -180,14 +228,91 @@
   EventEvacuationFailed e;
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.gc_id().id());
-    e.set_data(to_trace_struct(ef_info));
+    e.set_evacuationFailed(to_struct(ef_info));
     e.commit();
   }
 }
-#endif
 
-static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
-  TraceStructVirtualSpace space;
+// XXX
+//static JfrStructG1EvacuationStatistics
+//create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
+//  JfrStructG1EvacuationStatistics s;
+//  s.set_gcId(gcid);
+//  s.set_allocated(summary.allocated() * HeapWordSize);
+//  s.set_wasted(summary.wasted() * HeapWordSize);
+//  s.set_used(summary.used() * HeapWordSize);
+//  s.set_undoWaste(summary.undo_wasted() * HeapWordSize);
+//  s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize);
+//  s.set_regionsRefilled(summary.regions_filled());
+//  s.set_directAllocated(summary.direct_allocated() * HeapWordSize);
+//  s.set_failureUsed(summary.failure_used() * HeapWordSize);
+//  s.set_failureWaste(summary.failure_waste() * HeapWordSize);
+//  return s;
+//}
+//
+//void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
+//  EventG1EvacuationYoungStatistics surv_evt;
+//  if (surv_evt.should_commit()) {
+//    surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
+//    surv_evt.commit();
+//  }
+//}
+//
+//void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
+//  EventG1EvacuationOldStatistics old_evt;
+//  if (old_evt.should_commit()) {
+//    old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
+//    old_evt.commit();
+//  }
+//}
+//
+//void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
+//                                             size_t target_occupancy,
+//                                             size_t current_occupancy,
+//                                             size_t last_allocation_size,
+//                                             double last_allocation_duration,
+//                                             double last_marking_length) {
+//  EventG1BasicIHOP evt;
+//  if (evt.should_commit()) {
+//    evt.set_gcId(_shared_gc_info.gc_id().id());
+//    evt.set_threshold(threshold);
+//    evt.set_targetOccupancy(target_occupancy);
+//    evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
+//    evt.set_currentOccupancy(current_occupancy);
+//    evt.set_recentMutatorAllocationSize(last_allocation_size);
+//    evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
+//    evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
+//    evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
+//    evt.commit();
+//  }
+//}
+//
+//void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
+//                                                size_t internal_target_occupancy,
+//                                                size_t current_occupancy,
+//                                                size_t additional_buffer_size,
+//                                                double predicted_allocation_rate,
+//                                                double predicted_marking_length,
+//                                                bool prediction_active) {
+//  EventG1AdaptiveIHOP evt;
+//  if (evt.should_commit()) {
+//    evt.set_gcId(_shared_gc_info.gc_id().id());
+//    evt.set_threshold(threshold);
+//    evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0);
+//    evt.set_ihopTargetOccupancy(internal_target_occupancy);
+//    evt.set_currentOccupancy(current_occupancy);
+//    evt.set_additionalBufferSize(additional_buffer_size);
+//    evt.set_predictedAllocationRate(predicted_allocation_rate);
+//    evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS);
+//    evt.set_predictionActive(prediction_active);
+//    evt.commit();
+//  }
+//}
+
+#endif // INCLUDE_ALL_GCS
+
+static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) {
+  JfrStructVirtualSpace space;
   space.set_start((TraceAddress)summary.start());
   space.set_committedEnd((TraceAddress)summary.committed_end());
   space.set_committedSize(summary.committed_size());
@@ -196,8 +321,8 @@
   return space;
 }
 
-static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
-  TraceStructObjectSpace space;
+static JfrStructObjectSpace to_struct(const SpaceSummary& summary) {
+  JfrStructObjectSpace space;
   space.set_start((TraceAddress)summary.start());
   space.set_end((TraceAddress)summary.end());
   space.set_used(summary.used());
@@ -218,12 +343,27 @@
     if (e.should_commit()) {
       e.set_gcId(_gc_id.id());
       e.set_when((u1)_when);
-      e.set_heapSpace(to_trace_struct(heap_space));
+      e.set_heapSpace(to_struct(heap_space));
       e.set_heapUsed(heap_summary->used());
       e.commit();
     }
   }
 
+//  void visit(const G1HeapSummary* g1_heap_summary) const {
+//    visit((GCHeapSummary*)g1_heap_summary);
+//
+//    EventG1HeapSummary e;
+//    if (e.should_commit()) {
+//      e.set_gcId(_shared_gc_info.gc_id().id());
+//      e.set_when((u1)_when);
+//      e.set_edenUsedSize(g1_heap_summary->edenUsed());
+//      e.set_edenTotalSize(g1_heap_summary->edenCapacity());
+//      e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
+//      e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
+//      e.commit();
+//    }
+//  }
+
   void visit(const PSHeapSummary* ps_heap_summary) const {
     visit((GCHeapSummary*)ps_heap_summary);
 
@@ -239,12 +379,12 @@
       e.set_gcId(_gc_id.id());
       e.set_when((u1)_when);
 
-      e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
-      e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space()));
-      e.set_youngSpace(to_trace_struct(ps_heap_summary->young()));
-      e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
-      e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
-      e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
+      e.set_oldSpace(to_struct(ps_heap_summary->old()));
+      e.set_oldObjectSpace(to_struct(ps_heap_summary->old_space()));
+      e.set_youngSpace(to_struct(ps_heap_summary->young()));
+      e.set_edenSpace(to_struct(ps_heap_summary->eden()));
+      e.set_fromSpace(to_struct(ps_heap_summary->from()));
+      e.set_toSpace(to_struct(ps_heap_summary->to()));
       e.commit();
     }
   }
@@ -255,8 +395,8 @@
   heap_summary.accept(&visitor);
 }
 
-static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
-  TraceStructMetaspaceSizes meta_sizes;
+static JfrStructMetaspaceSizes to_struct(const MetaspaceSizes& sizes) {
+  JfrStructMetaspaceSizes meta_sizes;
 
   meta_sizes.set_committed(sizes.committed());
   meta_sizes.set_used(sizes.used());
@@ -271,9 +411,9 @@
     e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_when((u1) when);
     e.set_gcThreshold(meta_space_summary.capacity_until_GC());
-    e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
-    e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
-    e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
+    e.set_metaspace(to_struct(meta_space_summary.meta_space()));
+    e.set_dataSpace(to_struct(meta_space_summary.data_space()));
+    e.set_classSpace(to_struct(meta_space_summary.class_space()));
     e.commit();
   }
 }
@@ -283,14 +423,14 @@
  public:
   PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
 
-  template<typename T>
-  void send_phase(PausePhase* pause) {
+   template<typename T>
+  void send_phase(GCPhase* phase) {
     T event(UNTIMED);
     if (event.should_commit()) {
       event.set_gcId(_gc_id.id());
-      event.set_name(pause->name());
-      event.set_starttime(pause->start());
-      event.set_endtime(pause->end());
+      event.set_name(phase->name());
+      event.set_starttime(phase->start());
+      event.set_endtime(phase->end());
       event.commit();
     }
   }
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,7 +32,7 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/ostream.hpp"
-#include "utilities/ticks.inline.hpp"
+#include "utilities/ticks.hpp"
 
 
 GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) :
@@ -72,7 +72,7 @@
 
   if (_doit) {
     const Tickspan duration = stop_counter - _start_counter;
-    double duration_in_seconds = TicksToTimeHelper::seconds(duration);
+    double duration_in_seconds = duration.seconds();
     if (_print_cr) {
       gclog_or_tty->print_cr(", %3.7f secs]", duration_in_seconds);
     } else {
--- a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -26,34 +26,53 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/gcId.hpp"
 #include "gc_implementation/shared/objectCountEventSender.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/heapInspection.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
+
+
 #if INCLUDE_SERVICES
 
+bool ObjectCountEventSender::should_send_event() {
+#if INCLUDE_JFR
+  return _should_send_requestable_event || EventObjectCountAfterGC::is_enabled();
+#else
+  return false;
+#endif // INCLUDE_JFR
+}
+
+bool ObjectCountEventSender::_should_send_requestable_event = false;
+
+void ObjectCountEventSender::enable_requestable_event() {
+  _should_send_requestable_event = true;
+}
+
+void ObjectCountEventSender::disable_requestable_event() {
+  _should_send_requestable_event = false;
+}
+
+template <typename T>
+void ObjectCountEventSender::send_event_if_enabled(Klass* klass, GCId gc_id, jlong count, julong size, const Ticks& timestamp) {
+  T event(UNTIMED);
+  if (event.should_commit()) {
+    event.set_gcId(gc_id.id());
+    event.set_objectClass(klass);
+    event.set_count(count);
+    event.set_totalSize(size);
+    event.set_endtime(timestamp);
+    event.commit();
+  }
+}
+
 void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) {
-#if INCLUDE_TRACE
-  assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
-         "Only call this method if the event is enabled");
+  Klass* klass = entry->klass();
+  jlong count = entry->count();
+  julong total_size = entry->words() * BytesPerWord;
 
-  EventObjectCountAfterGC event(UNTIMED);
-  event.set_gcId(gc_id.id());
-  event.set_class(entry->klass());
-  event.set_count(entry->count());
-  event.set_totalSize(entry->words() * BytesPerWord);
-  event.set_endtime(timestamp);
-  event.commit();
-#endif // INCLUDE_TRACE
-}
-
-bool ObjectCountEventSender::should_send_event() {
-#if INCLUDE_TRACE
-  return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId);
-#else
-  return false;
-#endif // INCLUDE_TRACE
+  send_event_if_enabled<EventObjectCount>(klass, gc_id, count, total_size, timestamp);
+  send_event_if_enabled<EventObjectCountAfterGC>(klass, gc_id, count, total_size, timestamp);
 }
 
 #endif // INCLUDE_SERVICES
--- a/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,10 +32,17 @@
 #if INCLUDE_SERVICES
 
 class KlassInfoEntry;
-class Ticks;
 
 class ObjectCountEventSender : public AllStatic {
+  static bool _should_send_requestable_event;
+
+  template <typename T>
+  static void send_event_if_enabled(Klass* klass, GCId gc_id, jlong count, julong size, const Ticks& timestamp);
+
  public:
+  static void enable_requestable_event();
+  static void disable_requestable_event();
+
   static void send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp);
   static bool should_send_event();
 };
--- a/src/share/vm/gc_interface/allocTracer.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_interface/allocTracer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -25,23 +25,28 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/gcId.hpp"
 #include "gc_interface/allocTracer.hpp"
-#include "trace/tracing.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "runtime/handles.hpp"
 #include "utilities/globalDefinitions.hpp"
+#if INCLUDE_JFR
+#include "jfr/support/jfrAllocationTracer.hpp"
+#endif
 
-void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) {
-  EventAllocObjectOutsideTLAB event;
+void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread) {
+  JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);)
+  EventObjectAllocationOutsideTLAB event;
   if (event.should_commit()) {
-    event.set_class(klass());
+    event.set_objectClass(klass());
     event.set_allocationSize(alloc_size);
     event.commit();
   }
 }
 
-void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) {
-  EventAllocObjectInNewTLAB event;
+void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread) {
+  JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);)
+  EventObjectAllocationInNewTLAB event;
   if (event.should_commit()) {
-    event.set_class(klass());
+    event.set_objectClass(klass());
     event.set_allocationSize(alloc_size);
     event.set_tlabSize(tlab_size);
     event.commit();
--- a/src/share/vm/gc_interface/allocTracer.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_interface/allocTracer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -30,8 +30,8 @@
 
 class AllocTracer : AllStatic {
   public:
-    static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size);
-    static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size);
+    static void send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread);
+    static void send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread);
     static void send_allocation_requiring_gc_event(size_t size, const GCId& gcId);
 };
 
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -286,7 +286,7 @@
     return NULL;
   }
 
-  AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
+  AllocTracer::send_allocation_in_new_tlab_event(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, Thread::current());
 
   if (ZeroTLAB) {
     // ..and clear it.
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -140,7 +140,7 @@
            "Unexpected exception, will result in uninitialized storage");
     THREAD->incr_allocated_bytes(size * HeapWordSize);
 
-    AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize);
+    AllocTracer::send_allocation_outside_tlab_event(klass, result, size * HeapWordSize, Thread::current());
 
     return result;
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/GenerateJfrFiles.java	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,696 @@
+package build.tools.jfr;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.StringJoiner;
+import java.util.function.Predicate;
+
+import javax.xml.XMLConstants;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.parsers.SAXParser;
+import javax.xml.parsers.SAXParserFactory;
+import javax.xml.validation.SchemaFactory;
+
+import org.xml.sax.Attributes;
+import org.xml.sax.SAXException;
+import org.xml.sax.SAXParseException;
+import org.xml.sax.helpers.DefaultHandler;
+
+public class GenerateJfrFiles {
+
+    public static void main(String... args) throws Exception {
+        if (args.length != 3) {
+            System.err.println("Incorrect number of command line arguments.");
+            System.err.println("Usage:");
+            System.err.println("java GenerateJfrFiles[.java] <path-to-metadata.xml> <path-to-metadata.xsd> <output-directory>");
+            System.exit(1);
+        }
+        try {
+            File metadataXml = new File(args[0]);
+            File metadataSchema = new File(args[1]);
+            File outputDirectory = new File(args[2]);
+
+            Metadata metadata = new Metadata(metadataXml, metadataSchema);
+            metadata.verify();
+            metadata.wireUpTypes();
+
+            printJfrPeriodicHpp(metadata, outputDirectory);
+            printJfrEventIdsHpp(metadata, outputDirectory);
+            printJfrEventControlHpp(metadata, outputDirectory);
+            printJfrTypesHpp(metadata, outputDirectory);
+            printJfrEventClassesHpp(metadata, outputDirectory);
+
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    static class XmlType {
+        final String fieldType;
+        final String parameterType;
+        XmlType(String fieldType, String parameterType) {
+            this.fieldType = fieldType;
+            this.parameterType = parameterType;
+        }
+    }
+
+    static class TypeElement {
+        List<FieldElement> fields = new ArrayList<>();
+        String name;
+        String fieldType;
+        String parameterType;
+        boolean supportStruct;
+    }
+
+    static class Metadata {
+        final Map<String, TypeElement> types = new LinkedHashMap<>();
+        final Map<String, XmlType> xmlTypes = new HashMap<>();
+        Metadata(File metadataXml, File metadataSchema) throws ParserConfigurationException, SAXException, FileNotFoundException, IOException {
+            SchemaFactory schemaFactory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
+            SAXParserFactory factory = SAXParserFactory.newInstance();
+            factory.setSchema(schemaFactory.newSchema(metadataSchema));
+            SAXParser sp = factory.newSAXParser();
+            sp.parse(metadataXml, new MetadataHandler(this));
+        }
+
+        List<EventElement> getEvents() {
+            return getList(t -> t.getClass() == EventElement.class);
+        }
+
+        List<TypeElement> getEventsAndStructs() {
+            return getList(t -> t.getClass() == EventElement.class || t.supportStruct);
+        }
+
+        List<TypeElement> getTypesAndStructs() {
+            return getList(t -> t.getClass() == TypeElement.class || t.supportStruct);
+        }
+
+        @SuppressWarnings("unchecked")
+        <T> List<T> getList(Predicate<? super TypeElement> pred) {
+            List<T> result = new ArrayList<>(types.size());
+            for (TypeElement t : types.values()) {
+                if (pred.test(t)) {
+                    result.add((T) t);
+                }
+            }
+            return result;
+        }
+
+        List<EventElement> getPeriodicEvents() {
+            return getList(t -> t.getClass() == EventElement.class && ((EventElement) t).periodic);
+        }
+
+        List<TypeElement> getNonEventsAndNonStructs() {
+            return getList(t -> t.getClass() != EventElement.class && !t.supportStruct);
+        }
+
+        List<TypeElement> getTypes() {
+            return getList(t -> t.getClass() == TypeElement.class && !t.supportStruct);
+        }
+
+        List<TypeElement> getStructs() {
+            return getList(t -> t.getClass() == TypeElement.class && t.supportStruct);
+        }
+
+        void verify()  {
+            for (TypeElement t : types.values()) {
+                for (FieldElement f : t.fields) {
+                    if (!xmlTypes.containsKey(f.typeName)) { // ignore primitives
+                        if (!types.containsKey(f.typeName)) {
+                            throw new IllegalStateException("Could not find definition of type '" + f.typeName + "' used by " + t.name + "#" + f.name);
+                        }
+                    }
+                }
+            }
+        }
+
+        void wireUpTypes() {
+            for (TypeElement t : types.values()) {
+                for (FieldElement f : t.fields) {
+                    TypeElement type = types.get(f.typeName);
+                    if (f.struct) {
+                        type.supportStruct = true;
+                    }
+                    f.type = type;
+                }
+            }
+        }
+    }
+
+    static class EventElement extends TypeElement {
+        String representation;
+        boolean thread;
+        boolean stackTrace;
+        boolean startTime;
+        boolean periodic;
+        boolean cutoff;
+    }
+
+    static class FieldElement {
+        final Metadata metadata;
+        TypeElement type;
+        String name;
+        String typeName;
+        boolean struct;
+
+        FieldElement(Metadata metadata) {
+            this.metadata = metadata;
+        }
+
+        String getParameterType() {
+            if (struct) {
+                return "const JfrStruct" + typeName + "&";
+            }
+            XmlType xmlType = metadata.xmlTypes.get(typeName);
+            if (xmlType != null) {
+                return xmlType.parameterType;
+            }
+            return type != null ? "u8" : typeName;
+        }
+
+        String getParameterName() {
+            return struct ? "value" : "new_value";
+        }
+
+        String getFieldType() {
+            if (struct) {
+                return "JfrStruct" + typeName;
+            }
+            XmlType xmlType = metadata.xmlTypes.get(typeName);
+            if (xmlType != null) {
+                return xmlType.fieldType;
+            }
+            return type != null ? "u8" : typeName;
+        }
+    }
+
+    static class MetadataHandler extends DefaultHandler {
+        final Metadata metadata;
+        FieldElement currentField;
+        TypeElement currentType;
+        MetadataHandler(Metadata metadata) {
+            this.metadata = metadata;
+        }
+        @Override
+        public void error(SAXParseException e) throws SAXException {
+          throw e;
+        }
+        @Override
+        public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {
+            switch (qName) {
+            case "XmlType":
+                String name = attributes.getValue("name");
+                String parameterType = attributes.getValue("parameterType");
+                String fieldType = attributes.getValue("fieldType");
+                metadata.xmlTypes.put(name, new XmlType(fieldType, parameterType));
+                break;
+            case "Type":
+                currentType = new TypeElement();
+                currentType.name = attributes.getValue("name");
+                break;
+            case "Event":
+                EventElement eventtType = new EventElement();
+                eventtType.name = attributes.getValue("name");
+                eventtType.thread = getBoolean(attributes, "thread", false);
+                eventtType.stackTrace = getBoolean(attributes, "stackTrace", false);
+                eventtType.startTime = getBoolean(attributes, "startTime", true);
+                eventtType.periodic = attributes.getValue("period") != null;
+                eventtType.cutoff = getBoolean(attributes, "cutoff", false);
+                currentType = eventtType;
+                break;
+            case "Field":
+                currentField = new FieldElement(metadata);
+                currentField.struct = getBoolean(attributes, "struct", false);
+                currentField.name = attributes.getValue("name");
+                currentField.typeName = attributes.getValue("type");
+                break;
+            }
+        }
+
+        private boolean getBoolean(Attributes attributes, String name, boolean defaultValue) {
+            String value = attributes.getValue(name);
+            return value == null ? defaultValue : Boolean.valueOf(value);
+        }
+
+        @Override
+        public void endElement(String uri, String localName, String qName) {
+            switch (qName) {
+            case "Type":
+            case "Event":
+                metadata.types.put(currentType.name, currentType);
+                currentType = null;
+                break;
+            case "Field":
+                currentType.fields.add(currentField);
+                currentField = null;
+                break;
+            }
+        }
+    }
+
+    static class Printer implements AutoCloseable {
+        final PrintStream out;
+        Printer(File outputDirectory, String filename) throws FileNotFoundException {
+            out = new PrintStream(new BufferedOutputStream(new FileOutputStream(new File(outputDirectory, filename))));
+            write("/* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */");
+            write("");
+        }
+
+        void write(String text) {
+            out.print(text);
+            out.print("\n"); // Don't use Windows line endings
+        }
+
+        @Override
+        public void close() throws Exception {
+            out.close();
+        }
+    }
+
+    private static void printJfrPeriodicHpp(Metadata metadata, File outputDirectory) throws Exception {
+        try (Printer out = new Printer(outputDirectory, "jfrPeriodic.hpp")) {
+            out.write("#ifndef JFRFILES_JFRPERIODICEVENTSET_HPP");
+            out.write("#define JFRFILES_JFRPERIODICEVENTSET_HPP");
+            out.write("");
+            out.write("#include \"utilities/macros.hpp\"");
+            out.write("#if INCLUDE_JFR");
+            out.write("#include \"jfrfiles/jfrEventIds.hpp\"");
+            out.write("#include \"memory/allocation.hpp\"");
+            out.write("");
+            out.write("class JfrPeriodicEventSet : public AllStatic {");
+            out.write(" public:");
+            out.write("  static void requestEvent(JfrEventId id) {");
+            out.write("    switch(id) {");
+            out.write("  ");
+            for (EventElement e : metadata.getPeriodicEvents()) {
+                out.write("      case Jfr" + e.name + "Event:");
+                out.write("        request" + e.name + "();");
+                out.write("        break;");
+                out.write("  ");
+            }
+            out.write("      default:");
+            out.write("        break;");
+            out.write("      }");
+            out.write("    }");
+            out.write("");
+            out.write(" private:");
+            out.write("");
+            for (EventElement e : metadata.getPeriodicEvents()) {
+                out.write("  static void request" + e.name + "(void);");
+                out.write("");
+            }
+            out.write("};");
+            out.write("");
+            out.write("#endif // INCLUDE_JFR");
+            out.write("#endif // JFRFILES_JFRPERIODICEVENTSET_HPP");
+        }
+    }
+
+    private static void printJfrEventControlHpp(Metadata metadata, File outputDirectory) throws Exception {
+        try (Printer out = new Printer(outputDirectory, "jfrEventControl.hpp")) {
+            out.write("#ifndef JFRFILES_JFR_NATIVE_EVENTSETTING_HPP");
+            out.write("#define JFRFILES_JFR_NATIVE_EVENTSETTING_HPP");
+            out.write("");
+            out.write("#include \"utilities/macros.hpp\"");
+            out.write("#if INCLUDE_JFR");
+            out.write("#include \"jfrfiles/jfrEventIds.hpp\"");
+            out.write("");
+            out.write("/**");
+            out.write(" * Event setting. We add some padding so we can use our");
+            out.write(" * event IDs as indexes into this.");
+            out.write(" */");
+            out.write("");
+            out.write("struct jfrNativeEventSetting {");
+            out.write("  jlong  threshold_ticks;");
+            out.write("  jlong  cutoff_ticks;");
+            out.write("  u1     stacktrace;");
+            out.write("  u1     enabled;");
+            out.write("  u1     pad[6]; // Because GCC on linux ia32 at least tries to pack this.");
+            out.write("};");
+            out.write("");
+            out.write("union JfrNativeSettings {");
+            out.write("  // Array version.");
+            out.write("  jfrNativeEventSetting bits[MaxJfrEventId];");
+            out.write("  // Then, to make it easy to debug,");
+            out.write("  // add named struct members also.");
+            out.write("  struct {");
+            out.write("    jfrNativeEventSetting pad[NUM_RESERVED_EVENTS];");
+            for (TypeElement t : metadata.getEventsAndStructs()) {
+                out.write("    jfrNativeEventSetting " + t.name + ";");
+            }
+            out.write("  } ev;");
+            out.write("};");
+            out.write("");
+            out.write("#endif // INCLUDE_JFR");
+            out.write("#endif // JFRFILES_JFR_NATIVE_EVENTSETTING_HPP");
+        }
+    }
+
+    private static void printJfrEventIdsHpp(Metadata metadata, File outputDirectory) throws Exception {
+        try (Printer out = new Printer(outputDirectory, "jfrEventIds.hpp")) {
+            out.write("#ifndef JFRFILES_JFREVENTIDS_HPP");
+            out.write("#define JFRFILES_JFREVENTIDS_HPP");
+            out.write("");
+            out.write("#include \"utilities/macros.hpp\"");
+            out.write("#if INCLUDE_JFR");
+            out.write("#include \"jfrfiles/jfrTypes.hpp\"");
+            out.write("");
+            out.write("/**");
+            out.write(" * Enum of the event types in the JVM");
+            out.write(" */");
+            out.write("enum JfrEventId {");
+            out.write("  _jfreventbase = (NUM_RESERVED_EVENTS-1), // Make sure we start at right index.");
+            out.write("  ");
+            out.write("  // Events -> enum entry");
+            for (TypeElement t : metadata.getEventsAndStructs()) {
+                out.write("  Jfr" + t.name + "Event,");
+            }
+            out.write("");
+            out.write("  MaxJfrEventId");
+            out.write("};");
+            out.write("");
+            out.write("/**");
+            out.write(" * Struct types in the JVM");
+            out.write(" */");
+            out.write("enum JfrStructId {");
+            for (TypeElement t : metadata.getNonEventsAndNonStructs()) {
+                out.write("  Jfr" + t.name + "Struct,");
+            }
+            for (TypeElement t : metadata.getEventsAndStructs()) {
+                out.write("  Jfr" + t.name + "Struct,");
+            }
+            out.write("");
+            out.write("  MaxJfrStructId");
+            out.write("};");
+            out.write("");
+            out.write("typedef enum JfrEventId JfrEventId;");
+            out.write("typedef enum JfrStructId JfrStructId;");
+            out.write("");
+            out.write("#endif // INCLUDE_JFR");
+            out.write("#endif // JFRFILES_JFREVENTIDS_HPP");
+        }
+    }
+
+    private static void printJfrTypesHpp(Metadata metadata, File outputDirectory) throws Exception {
+      List<String> knownTypes = Arrays.asList(new String[] {"Thread", "StackTrace", "Class", "StackFrame"});
+        try (Printer out = new Printer(outputDirectory, "jfrTypes.hpp")) {
+            out.write("#ifndef JFRFILES_JFRTYPES_HPP");
+            out.write("#define JFRFILES_JFRTYPES_HPP");
+            out.write("");
+            out.write("#include \"utilities/macros.hpp\"");
+            out.write("#if INCLUDE_JFR");
+            out.write("");
+            out.write("enum JfrTypeId {");
+            out.write("  TYPE_NONE             = 0,");
+            out.write("  TYPE_CLASS            = 20,");
+            out.write("  TYPE_STRING           = 21,");
+            out.write("  TYPE_THREAD           = 22,");
+            out.write("  TYPE_STACKTRACE       = 23,");
+            out.write("  TYPE_BYTES            = 24,");
+            out.write("  TYPE_EPOCHMILLIS      = 25,");
+            out.write("  TYPE_MILLIS           = 26,");
+            out.write("  TYPE_NANOS            = 27,");
+            out.write("  TYPE_TICKS            = 28,");
+            out.write("  TYPE_ADDRESS          = 29,");
+            out.write("  TYPE_PERCENTAGE       = 30,");
+            out.write("  TYPE_DUMMY,");
+            out.write("  TYPE_DUMMY_1,");
+            for (TypeElement type : metadata.getTypes()) {
+                if (!knownTypes.contains(type.name)) {
+                    out.write("  TYPE_" + type.name.toUpperCase() + ",");
+                }
+            }
+            out.write("");
+            out.write("  NUM_JFR_TYPES,");
+            out.write("  TYPES_END             = 255");
+            out.write("};");
+            out.write("");
+            out.write("enum ReservedEvent {");
+            out.write("  EVENT_METADATA,");
+            out.write("  EVENT_CHECKPOINT,");
+            out.write("  EVENT_BUFFERLOST,");
+            out.write("  NUM_RESERVED_EVENTS = TYPES_END");
+            out.write("};");
+            out.write("");
+            out.write("#endif // INCLUDE_JFR");
+            out.write("#endif // JFRFILES_JFRTYPES_HPP");
+          };
+    }
+
+    private static void printJfrEventClassesHpp(Metadata metadata, File outputDirectory) throws Exception {
+        try (Printer out = new Printer(outputDirectory, "jfrEventClasses.hpp")) {
+            out.write("#ifndef JFRFILES_JFREVENTCLASSES_HPP");
+            out.write("#define JFRFILES_JFREVENTCLASSES_HPP");
+            out.write("");
+            out.write("#include \"oops/klass.hpp\"");
+            out.write("#include \"jfrfiles/jfrTypes.hpp\"");
+            out.write("#include \"jfr/utilities/jfrTypes.hpp\"");
+            out.write("#include \"utilities/macros.hpp\"");
+            out.write("#include \"utilities/ticks.hpp\"");
+            out.write("#if INCLUDE_JFR");
+            out.write("#include \"jfr/recorder/service/jfrEvent.hpp\"");
+            out.write("/*");
+            out.write(" * Each event class has an assert member function verify() which is invoked");
+            out.write(" * just before the engine writes the event and its fields to the data stream.");
+            out.write(" * The purpose of verify() is to ensure that all fields in the event are initialized");
+            out.write(" * and set before attempting to commit.");
+            out.write(" *");
+            out.write(" * We enforce this requirement because events are generally stack allocated and therefore");
+            out.write(" * *not* initialized to default values. This prevents us from inadvertently committing");
+            out.write(" * uninitialized values to the data stream.");
+            out.write(" *");
+            out.write(" * The assert message contains both the index (zero based) as well as the name of the field.");
+            out.write(" */");
+            out.write("");
+            printTypes(out, metadata, false);
+            out.write("");
+            out.write("");
+            out.write("#else // !INCLUDE_JFR");
+            out.write("");
+            out.write("class JfrEvent {");
+            out.write(" public:");
+            out.write("  JfrEvent() {}");
+            out.write("  void set_starttime(const Ticks&) const {}");
+            out.write("  void set_endtime(const Ticks&) const {}");
+            out.write("  bool should_commit() const { return false; }");
+            out.write("  static bool is_enabled() { return false; }");
+            out.write("  void commit() {}");
+            out.write("};");
+            out.write("");
+            printTypes(out, metadata, true);
+            out.write("");
+            out.write("");
+            out.write("#endif // INCLUDE_JFR");
+            out.write("#endif // JFRFILES_JFREVENTCLASSES_HPP");
+        }
+    }
+
+    private static void printTypes(Printer out, Metadata metadata, boolean empty) {
+        for (TypeElement t : metadata.getStructs()) {
+            if (empty) {
+                out.write("");
+                printEmptyType(out, t);
+            } else {
+                printType(out, t);
+            }
+            out.write("");
+        }
+        for (EventElement e : metadata.getEvents()) {
+            if (empty) {
+                printEmptyEvent(out, e);
+            } else {
+                printEvent(out, e);
+            }
+            out.write("");
+        }
+    }
+
+    private static void printEmptyEvent(Printer out, EventElement event) {
+        out.write("class Event" + event.name + " : public JfrEvent");
+        out.write("{");
+        out.write(" public:");
+        out.write("  Event" + event.name + "(EventStartTime ignore=TIMED) {}");
+        if (event.startTime) {
+            StringJoiner sj = new StringJoiner(",\n    ");
+            for (FieldElement f : event.fields) {
+                sj.add(f.getParameterType());
+            }
+            out.write("  Event" + event.name + "(");
+            out.write("    " + sj.toString() + ") { }");
+        }
+        for (FieldElement f : event.fields) {
+            out.write("  void set_" + f.name + "(" + f.getParameterType() + ") { }");
+        }
+        out.write("};");
+    }
+
+    private static void printEmptyType(Printer out, TypeElement t) {
+        out.write("struct JfrStruct" + t.name);
+        out.write("{");
+        out.write(" public:");
+        for (FieldElement f : t.fields) {
+            out.write("  void set_" + f.name + "(" + f.getParameterType() + ") { }");
+        }
+        out.write("};");
+    }
+
+    private static void printType(Printer out, TypeElement t) {
+        out.write("struct JfrStruct" + t.name);
+        out.write("{");
+        out.write(" private:");
+        for (FieldElement f : t.fields) {
+            printField(out, f);
+        }
+        out.write("");
+        out.write(" public:");
+        for (FieldElement f : t.fields) {
+            printTypeSetter(out, f);
+        }
+        out.write("");
+        printWriteData(out, t.fields);
+        out.write("};");
+        out.write("");
+    }
+
+    private static void printEvent(Printer out, EventElement event) {
+        out.write("class Event" + event.name + " : public JfrEvent<Event" + event.name + ">");
+        out.write("{");
+        out.write(" private:");
+        for (FieldElement f : event.fields) {
+            printField(out, f);
+        }
+        out.write("");
+        out.write(" public:");
+        out.write("  static const bool hasThread = " + event.thread + ";");
+        out.write("  static const bool hasStackTrace = " + event.stackTrace + ";");
+        out.write("  static const bool isInstant = " + !event.startTime + ";");
+        out.write("  static const bool hasCutoff = " + event.cutoff + ";");
+        out.write("  static const bool isRequestable = " + event.periodic + ";");
+        out.write("  static const JfrEventId eventId = Jfr" + event.name + "Event;");
+        out.write("");
+        out.write("  Event" + event.name + "(EventStartTime timing=TIMED) : JfrEvent<Event" + event.name + ">(timing) {}");
+        out.write("");
+        int index = 0;
+        for (FieldElement f : event.fields) {
+            out.write("  void set_" + f.name + "(" + f.getParameterType() + " " + f.getParameterName() + ") {");
+            out.write("    this->_" + f.name + " = " + f.getParameterName() + ";");
+            out.write("    DEBUG_ONLY(set_field_bit(" + index++ + "));");
+            out.write("  }");
+        }
+        out.write("");
+        printWriteData(out, event.fields);
+        out.write("");
+        out.write("  using JfrEvent<Event" + event.name + ">::commit; // else commit() is hidden by overloaded versions in this class");
+        printConstructor2(out, event);
+        printCommitMethod(out, event);
+        printVerify(out, event.fields);
+        out.write("};");
+    }
+
+    private static void printWriteData(Printer out, List<FieldElement> fields) {
+        out.write("  template <typename Writer>");
+        out.write("  void writeData(Writer& w) {");
+        for (FieldElement field : fields) {
+            if (field.struct) {
+                out.write("    _" + field.name + ".writeData(w);");
+            } else {
+                out.write("    w.write(_" + field.name + ");");
+            }
+        }
+        out.write("  }");
+    }
+
+    private static void printTypeSetter(Printer out, FieldElement field) {
+        out.write("  void set_" + field.name + "(" + field.getParameterType() + " new_value) { this->_" + field.name + " = new_value; }");
+    }
+
+    private static void printVerify(Printer out, List<FieldElement> fields) {
+        out.write("");
+        out.write("#ifdef ASSERT");
+        out.write("  void verify() const {");
+        int index = 0;
+        for (FieldElement f : fields) {
+            out.write("    assert(verify_field_bit(" + index++ + "), \"Attempting to write an uninitialized event field: " + f.name + "\");");
+        }
+        out.write("  }");
+        out.write("#endif");
+    }
+
+    private static void printCommitMethod(Printer out, EventElement event) {
+        if (event.startTime) {
+            StringJoiner sj = new StringJoiner(",\n              ");
+            for (FieldElement f : event.fields) {
+                sj.add(f.getParameterType() + " " + f.name);
+            }
+            out.write("");
+            out.write("  void commit(" + sj.toString() + ") {");
+            out.write("    if (should_commit()) {");
+            for (FieldElement f : event.fields) {
+                out.write("      set_" + f.name + "(" + f.name + ");");
+            }
+            out.write("      commit();");
+            out.write("    }");
+            out.write("  }");
+        }
+        out.write("");
+        StringJoiner sj = new StringJoiner(",\n                     ");
+        if (event.startTime) {
+            sj.add("const Ticks& startTicks");
+            sj.add("const Ticks& endTicks");
+        }
+        for (FieldElement f : event.fields) {
+            sj.add(f.getParameterType() + " " + f.name);
+        }
+        out.write("  static void commit(" + sj.toString() + ") {");
+        out.write("    Event" + event.name + " me(UNTIMED);");
+        out.write("");
+        out.write("    if (me.should_commit()) {");
+        if (event.startTime) {
+            out.write("      me.set_starttime(startTicks);");
+            out.write("      me.set_endtime(endTicks);");
+        }
+        for (FieldElement f : event.fields) {
+            out.write("      me.set_" + f.name + "(" + f.name + ");");
+        }
+        out.write("      me.commit();");
+        out.write("    }");
+        out.write("  }");
+    }
+
+    private static void printConstructor2(Printer out, EventElement event) {
+        if (!event.startTime) {
+            out.write("");
+            out.write("");
+        }
+        if (event.startTime) {
+            out.write("");
+            out.write("  Event" + event.name + "(");
+            StringJoiner sj = new StringJoiner(",\n    ");
+            for (FieldElement f : event.fields) {
+                sj.add(f.getParameterType() + " " + f.name);
+            }
+            out.write("    " + sj.toString() + ") : JfrEvent<Event" + event.name + ">(TIMED) {");
+            out.write("    if (should_commit()) {");
+            for (FieldElement f : event.fields) {
+                out.write("      set_" + f.name + "(" + f.name + ");");
+            }
+            out.write("    }");
+            out.write("  }");
+        }
+    }
+
+    private static void printField(Printer out, FieldElement field) {
+        out.write("  " + field.getFieldType() + " _" + field.name + ";");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/dcmd/jfrDcmds.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,663 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/dcmd/jfrDcmds.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/symbol.hpp"
+#include "runtime/handles.inline.hpp"
+#include "services/diagnosticArgument.hpp"
+#include "services/diagnosticFramework.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#ifdef _WINDOWS
+#define JFR_FILENAME_EXAMPLE "C:\\Users\\user\\My Recording.jfr"
+#endif
+
+#ifdef __APPLE__
+#define JFR_FILENAME_EXAMPLE  "/Users/user/My Recording.jfr"
+#endif
+
+#ifndef JFR_FILENAME_EXAMPLE
+#define JFR_FILENAME_EXAMPLE "/home/user/My Recording.jfr"
+#endif
+
+// JNIHandle management
+
+// ------------------------------------------------------------------
+// push_jni_handle_block
+//
+// Push on a new block of JNI handles.
+static void push_jni_handle_block(Thread* const thread) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
+
+  // Allocate a new block for JNI handles.
+  // Inlined code from jni_PushLocalFrame()
+  JNIHandleBlock* prev_handles = thread->active_handles();
+  JNIHandleBlock* entry_handles = JNIHandleBlock::allocate_block(thread);
+  assert(entry_handles != NULL && prev_handles != NULL, "should not be NULL");
+  entry_handles->set_pop_frame_link(prev_handles);  // make sure prev handles get gc'd.
+  thread->set_active_handles(entry_handles);
+}
+
+// ------------------------------------------------------------------
+// pop_jni_handle_block
+//
+// Pop off the current block of JNI handles.
+static void pop_jni_handle_block(Thread* const thread) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
+
+  // Release our JNI handle block
+  JNIHandleBlock* entry_handles = thread->active_handles();
+  JNIHandleBlock* prev_handles = entry_handles->pop_frame_link();
+  // restore
+  thread->set_active_handles(prev_handles);
+  entry_handles->set_pop_frame_link(NULL);
+  JNIHandleBlock::release_block(entry_handles, thread); // may block
+}
+
+class JNIHandleBlockManager : public StackObj {
+ private:
+  Thread* const _thread;
+ public:
+  JNIHandleBlockManager(Thread* thread) : _thread(thread) {
+    push_jni_handle_block(_thread);
+  }
+
+  ~JNIHandleBlockManager() {
+    pop_jni_handle_block(_thread);
+  }
+};
+
+static bool is_disabled(outputStream* output) {
+  if (Jfr::is_disabled()) {
+    if (output != NULL) {
+      output->print_cr("Flight Recorder is disabled.\n");
+    }
+    return true;
+  }
+  return false;
+}
+
+static bool is_recorder_instance_created(outputStream* output) {
+  if (!JfrRecorder::is_created()) {
+    if (output != NULL) {
+      output->print_cr("No available recordings.\n");
+      output->print_cr("Use JFR.start to start a recording.\n");
+    }
+    return false;
+  }
+  return true;
+}
+
+static bool invalid_state(outputStream* out, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  return is_disabled(out);
+}
+
+static void print_pending_exception(outputStream* output, oop throwable) {
+  assert(throwable != NULL, "invariant");
+
+  oop msg = java_lang_Throwable::message(throwable);
+
+  if (msg != NULL) {
+    char* text = java_lang_String::as_utf8_string(msg);
+    output->print_raw_cr(text);
+  }
+}
+
+static void print_message(outputStream* output, const char* message) {
+  if (message != NULL) {
+    output->print_raw(message);
+  }
+}
+
+static void handle_dcmd_result(outputStream* output,
+                               const oop result,
+                               const DCmdSource source,
+                               TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  assert(output != NULL, "invariant");
+  if (HAS_PENDING_EXCEPTION) {
+    print_pending_exception(output, PENDING_EXCEPTION);
+    // Don't clear excption on startup, JVM should fail initialization.
+    if (DCmd_Source_Internal != source) {
+      CLEAR_PENDING_EXCEPTION;
+    }
+    return;
+  }
+
+  assert(!HAS_PENDING_EXCEPTION, "invariant");
+
+  if (result != NULL) {
+    const char* result_chars = java_lang_String::as_utf8_string(result);
+    print_message(output, result_chars);
+  }
+}
+
+static oop construct_dcmd_instance(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  assert(args->klass() != NULL, "invariant");
+  args->set_name("<init>", CHECK_NULL);
+  args->set_signature("()V", CHECK_NULL);
+  JfrJavaSupport::new_object(args, CHECK_NULL);
+  return (oop)args->result()->get_jobject();
+}
+
+JfrDumpFlightRecordingDCmd::JfrDumpFlightRecordingDCmd(outputStream* output,
+                                                       bool heap) : DCmdWithParser(output, heap),
+  _name("name", "Recording name, e.g. \\\"My Recording\\\"", "STRING", false, NULL),
+  _filename("filename", "Copy recording data to file, e.g. \\\"" JFR_FILENAME_EXAMPLE "\\\"", "STRING", false),
+  _maxage("maxage", "Maximum duration to dump, in (s)econds, (m)inutes, (h)ours, or (d)ays, e.g. 60m, or 0 for no limit", "NANOTIME", false, "0"),
+  _maxsize("maxsize", "Maximum amount of bytes to dump, in (M)B or (G)B, e.g. 500M, or 0 for no limit", "MEMORY SIZE", false, "0"),
+  _begin("begin", "Point in time to dump data from, e.g. 09:00, 21:35:00, 2018-06-03T18:12:56.827Z, 2018-06-03T20:13:46.832, -10m, -3h, or -1d", "STRING", false),
+  _end("end", "Point in time to dump data to, e.g. 09:00, 21:35:00, 2018-06-03T18:12:56.827Z, 2018-06-03T20:13:46.832, -10m, -3h, or -1d", "STRING", false),
+  _path_to_gc_roots("path-to-gc-roots", "Collect path to GC roots", "BOOLEAN", false, "false") {
+  _dcmdparser.add_dcmd_option(&_name);
+  _dcmdparser.add_dcmd_option(&_filename);
+  _dcmdparser.add_dcmd_option(&_maxage);
+  _dcmdparser.add_dcmd_option(&_maxsize);
+  _dcmdparser.add_dcmd_option(&_begin);
+  _dcmdparser.add_dcmd_option(&_end);
+  _dcmdparser.add_dcmd_option(&_path_to_gc_roots);
+};
+
+int JfrDumpFlightRecordingDCmd::num_arguments() {
+  ResourceMark rm;
+  JfrDumpFlightRecordingDCmd* dcmd = new JfrDumpFlightRecordingDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  }
+  return 0;
+}
+
+void JfrDumpFlightRecordingDCmd::execute(DCmdSource source, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  if (invalid_state(output(), THREAD) || !is_recorder_instance_created(output())) {
+    return;
+  }
+
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JNIHandleBlockManager jni_handle_management(THREAD);
+
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments constructor_args(&result);
+  constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdDump", CHECK);
+  const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK);
+  Handle h_dcmd_instance(THREAD, dcmd);
+  assert(h_dcmd_instance.not_null(), "invariant");
+
+  jstring name = NULL;
+  if (_name.is_set() && _name.value()  != NULL) {
+    name = JfrJavaSupport::new_string(_name.value(), CHECK);
+  }
+
+  jstring filepath = NULL;
+  if (_filename.is_set() && _filename.value() != NULL) {
+    filepath = JfrJavaSupport::new_string(_filename.value(), CHECK);
+  }
+
+  jobject maxage = NULL;
+  if (_maxage.is_set()) {
+    maxage = JfrJavaSupport::new_java_lang_Long(_maxage.value()._nanotime, CHECK);
+  }
+
+  jobject maxsize = NULL;
+  if (_maxsize.is_set()) {
+    maxsize = JfrJavaSupport::new_java_lang_Long(_maxsize.value()._size, CHECK);
+  }
+
+  jstring begin = NULL;
+  if (_begin.is_set() && _begin.value() != NULL) {
+    begin = JfrJavaSupport::new_string(_begin.value(), CHECK);
+  }
+
+  jstring end = NULL;
+  if (_end.is_set() && _end.value() != NULL) {
+    end = JfrJavaSupport::new_string(_end.value(), CHECK);
+  }
+
+  jobject path_to_gc_roots = NULL;
+  if (_path_to_gc_roots.is_set()) {
+    path_to_gc_roots = JfrJavaSupport::new_java_lang_Boolean(_path_to_gc_roots.value(), CHECK);
+  }
+
+  static const char klass[] = "jdk/jfr/internal/dcmd/DCmdDump";
+  static const char method[] = "execute";
+  static const char signature[] = "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/String;Ljava/lang/String;Ljava/lang/Boolean;)Ljava/lang/String;";
+
+  JfrJavaArguments execute_args(&result, klass, method, signature, CHECK);
+  execute_args.set_receiver(h_dcmd_instance);
+
+  // arguments
+  execute_args.push_jobject(name);
+  execute_args.push_jobject(filepath);
+  execute_args.push_jobject(maxage);
+  execute_args.push_jobject(maxsize);
+  execute_args.push_jobject(begin);
+  execute_args.push_jobject(end);
+  execute_args.push_jobject(path_to_gc_roots);
+
+  JfrJavaSupport::call_virtual(&execute_args, THREAD);
+  handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD);
+}
+
+JfrCheckFlightRecordingDCmd::JfrCheckFlightRecordingDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap),
+  _name("name","Recording name, e.g. \\\"My Recording\\\" or omit to see all recordings","STRING",false, NULL),
+  _verbose("verbose","Print event settings for the recording(s)","BOOLEAN",
+           false, "false") {
+  _dcmdparser.add_dcmd_option(&_name);
+  _dcmdparser.add_dcmd_option(&_verbose);
+};
+
+int JfrCheckFlightRecordingDCmd::num_arguments() {
+  ResourceMark rm;
+  JfrCheckFlightRecordingDCmd* dcmd = new JfrCheckFlightRecordingDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  }
+  return 0;
+}
+
+void JfrCheckFlightRecordingDCmd::execute(DCmdSource source, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  if (invalid_state(output(), THREAD) || !is_recorder_instance_created(output())) {
+    return;
+  }
+
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JNIHandleBlockManager jni_handle_management(THREAD);
+
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments constructor_args(&result);
+  constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdCheck", CHECK);
+  const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK);
+  Handle h_dcmd_instance(THREAD, dcmd);
+  assert(h_dcmd_instance.not_null(), "invariant");
+
+  jstring name = NULL;
+  if (_name.is_set() && _name.value() != NULL) {
+    name = JfrJavaSupport::new_string(_name.value(), CHECK);
+  }
+
+  jobject verbose = NULL;
+  if (_verbose.is_set()) {
+    verbose = JfrJavaSupport::new_java_lang_Boolean(_verbose.value(), CHECK);
+  }
+
+  static const char klass[] = "jdk/jfr/internal/dcmd/DCmdCheck";
+  static const char method[] = "execute";
+  static const char signature[] = "(Ljava/lang/String;Ljava/lang/Boolean;)Ljava/lang/String;";
+
+  JfrJavaArguments execute_args(&result, klass, method, signature, CHECK);
+  execute_args.set_receiver(h_dcmd_instance);
+
+  // arguments
+  execute_args.push_jobject(name);
+  execute_args.push_jobject(verbose);
+
+  JfrJavaSupport::call_virtual(&execute_args, THREAD);
+  handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD);
+}
+
+JfrStartFlightRecordingDCmd::JfrStartFlightRecordingDCmd(outputStream* output,
+                                                         bool heap) : DCmdWithParser(output, heap),
+  _name("name", "Name that can be used to identify recording, e.g. \\\"My Recording\\\"", "STRING", false, NULL),
+  _settings("settings", "Settings file(s), e.g. profile or default. See JRE_HOME/lib/jfr", "STRING SET", false),
+  _delay("delay", "Delay recording start with (s)econds, (m)inutes), (h)ours), or (d)ays, e.g. 5h.", "NANOTIME", false, "0"),
+  _duration("duration", "Duration of recording in (s)econds, (m)inutes, (h)ours, or (d)ays, e.g. 300s.", "NANOTIME", false, "0"),
+  _filename("filename", "Resulting recording filename, e.g. \\\"" JFR_FILENAME_EXAMPLE "\\\"", "STRING", false),
+  _disk("disk", "Recording should be persisted to disk", "BOOLEAN", false),
+  _maxage("maxage", "Maximum time to keep recorded data (on disk) in (s)econds, (m)inutes, (h)ours, or (d)ays, e.g. 60m, or 0 for no limit", "NANOTIME", false, "0"),
+  _maxsize("maxsize", "Maximum amount of bytes to keep (on disk) in (k)B, (M)B or (G)B, e.g. 500M, or 0 for no limit", "MEMORY SIZE", false, "0"),
+  _dump_on_exit("dumponexit", "Dump running recording when JVM shuts down", "BOOLEAN", false),
+  _path_to_gc_roots("path-to-gc-roots", "Collect path to GC roots", "BOOLEAN", false, "false") {
+  _dcmdparser.add_dcmd_option(&_name);
+  _dcmdparser.add_dcmd_option(&_settings);
+  _dcmdparser.add_dcmd_option(&_delay);
+  _dcmdparser.add_dcmd_option(&_duration);
+  _dcmdparser.add_dcmd_option(&_disk);
+  _dcmdparser.add_dcmd_option(&_filename);
+  _dcmdparser.add_dcmd_option(&_maxage);
+  _dcmdparser.add_dcmd_option(&_maxsize);
+  _dcmdparser.add_dcmd_option(&_dump_on_exit);
+  _dcmdparser.add_dcmd_option(&_path_to_gc_roots);
+};
+
+int JfrStartFlightRecordingDCmd::num_arguments() {
+  ResourceMark rm;
+  JfrStartFlightRecordingDCmd* dcmd = new JfrStartFlightRecordingDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  }
+  return 0;
+}
+
+void JfrStartFlightRecordingDCmd::execute(DCmdSource source, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  if (invalid_state(output(), THREAD)) {
+    return;
+  }
+
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JNIHandleBlockManager jni_handle_management(THREAD);
+
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments constructor_args(&result);
+  constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdStart", THREAD);
+  const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK);
+  Handle h_dcmd_instance(THREAD, dcmd);
+  assert(h_dcmd_instance.not_null(), "invariant");
+
+  jstring name = NULL;
+  if (_name.is_set() && _name.value() != NULL) {
+    name = JfrJavaSupport::new_string(_name.value(), CHECK);
+  }
+
+  jstring filename = NULL;
+  if (_filename.is_set() && _filename.value() != NULL) {
+    filename = JfrJavaSupport::new_string(_filename.value(), CHECK);
+  }
+
+  jobject maxage = NULL;
+  if (_maxage.is_set()) {
+    maxage = JfrJavaSupport::new_java_lang_Long(_maxage.value()._nanotime, CHECK);
+  }
+
+  jobject maxsize = NULL;
+  if (_maxsize.is_set()) {
+    maxsize = JfrJavaSupport::new_java_lang_Long(_maxsize.value()._size, CHECK);
+  }
+
+  jobject duration = NULL;
+  if (_duration.is_set()) {
+    duration = JfrJavaSupport::new_java_lang_Long(_duration.value()._nanotime, CHECK);
+  }
+
+  jobject delay = NULL;
+  if (_delay.is_set()) {
+    delay = JfrJavaSupport::new_java_lang_Long(_delay.value()._nanotime, CHECK);
+  }
+
+  jobject disk = NULL;
+  if (_disk.is_set()) {
+    disk = JfrJavaSupport::new_java_lang_Boolean(_disk.value(), CHECK);
+  }
+
+  jobject dump_on_exit = NULL;
+  if (_dump_on_exit.is_set()) {
+    dump_on_exit = JfrJavaSupport::new_java_lang_Boolean(_dump_on_exit.value(), CHECK);
+  }
+
+  jobject path_to_gc_roots = NULL;
+  if (_path_to_gc_roots.is_set()) {
+    path_to_gc_roots = JfrJavaSupport::new_java_lang_Boolean(_path_to_gc_roots.value(), CHECK);
+  }
+
+  jobjectArray settings = NULL;
+  if (_settings.is_set()) {
+    const int length = _settings.value()->array()->length();
+    settings = JfrJavaSupport::new_string_array(length, CHECK);
+    assert(settings != NULL, "invariant");
+    for (int i = 0; i < length; ++i) {
+      jobject element = JfrJavaSupport::new_string(_settings.value()->array()->at(i), CHECK);
+      assert(element != NULL, "invariant");
+      JfrJavaSupport::set_array_element(settings, element, i, CHECK);
+    }
+  }
+
+  static const char klass[] = "jdk/jfr/internal/dcmd/DCmdStart";
+  static const char method[] = "execute";
+  static const char signature[] = "(Ljava/lang/String;[Ljava/lang/String;Ljava/lang/Long;"
+    "Ljava/lang/Long;Ljava/lang/Boolean;Ljava/lang/String;"
+    "Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Boolean;Ljava/lang/Boolean;)Ljava/lang/String;";
+
+  JfrJavaArguments execute_args(&result, klass, method, signature, CHECK);
+  execute_args.set_receiver(h_dcmd_instance);
+
+  // arguments
+  execute_args.push_jobject(name);
+  execute_args.push_jobject(settings);
+  execute_args.push_jobject(delay);
+  execute_args.push_jobject(duration);
+  execute_args.push_jobject(disk);
+  execute_args.push_jobject(filename);
+  execute_args.push_jobject(maxage);
+  execute_args.push_jobject(maxsize);
+  execute_args.push_jobject(dump_on_exit);
+  execute_args.push_jobject(path_to_gc_roots);
+
+  JfrJavaSupport::call_virtual(&execute_args, THREAD);
+  handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD);
+}
+
+JfrStopFlightRecordingDCmd::JfrStopFlightRecordingDCmd(outputStream* output,
+                                                       bool heap) : DCmdWithParser(output, heap),
+  _name("name", "Recording text,.e.g \\\"My Recording\\\"", "STRING", true, NULL),
+  _filename("filename", "Copy recording data to file, e.g. \\\"" JFR_FILENAME_EXAMPLE "\\\"", "STRING", false, NULL) {
+  _dcmdparser.add_dcmd_option(&_name);
+  _dcmdparser.add_dcmd_option(&_filename);
+};
+
+int JfrStopFlightRecordingDCmd::num_arguments() {
+  ResourceMark rm;
+  JfrStopFlightRecordingDCmd* dcmd = new JfrStopFlightRecordingDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  }
+  return 0;
+}
+
+void JfrStopFlightRecordingDCmd::execute(DCmdSource source, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  if (invalid_state(output(), THREAD) || !is_recorder_instance_created(output())) {
+    return;
+  }
+
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JNIHandleBlockManager jni_handle_management(THREAD);
+
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments constructor_args(&result);
+  constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdStop", CHECK);
+  const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK);
+  Handle h_dcmd_instance(THREAD, dcmd);
+  assert(h_dcmd_instance.not_null(), "invariant");
+
+  jstring name = NULL;
+  if (_name.is_set() && _name.value()  != NULL) {
+    name = JfrJavaSupport::new_string(_name.value(), CHECK);
+  }
+
+  jstring filepath = NULL;
+  if (_filename.is_set() && _filename.value() != NULL) {
+    filepath = JfrJavaSupport::new_string(_filename.value(), CHECK);
+  }
+
+  static const char klass[] = "jdk/jfr/internal/dcmd/DCmdStop";
+  static const char method[] = "execute";
+  static const char signature[] = "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;";
+
+  JfrJavaArguments execute_args(&result, klass, method, signature, CHECK);
+  execute_args.set_receiver(h_dcmd_instance);
+
+  // arguments
+  execute_args.push_jobject(name);
+  execute_args.push_jobject(filepath);
+
+  JfrJavaSupport::call_virtual(&execute_args, THREAD);
+  handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD);
+}
+
+JfrConfigureFlightRecorderDCmd::JfrConfigureFlightRecorderDCmd(outputStream* output,
+                                                               bool heap) : DCmdWithParser(output, heap),
+  _repository_path("repositorypath", "Path to repository,.e.g \\\"My Repository\\\"", "STRING", false, NULL),
+  _dump_path("dumppath", "Path to dump,.e.g \\\"My Dump path\\\"", "STRING", false, NULL),
+  _stack_depth("stackdepth", "Stack Depth", "JLONG", false, "64"),
+  _global_buffer_count("globalbuffercount", "Number of global buffers,", "JLONG", false, "32"),
+  _global_buffer_size("globalbuffersize", "Size of a global buffers,", "JLONG", false, "524288"),
+  _thread_buffer_size("thread_buffer_size", "Size of a thread buffer", "JLONG", false, "8192"),
+  _memory_size("memorysize", "Overall memory size, ", "JLONG", false, "16777216"),
+  _max_chunk_size("maxchunksize", "Size of an individual disk chunk", "JLONG", false, "12582912"),
+  _sample_threads("samplethreads", "Activate Thread sampling", "BOOLEAN", false, "true") {
+  _dcmdparser.add_dcmd_option(&_repository_path);
+  _dcmdparser.add_dcmd_option(&_dump_path);
+  _dcmdparser.add_dcmd_option(&_stack_depth);
+  _dcmdparser.add_dcmd_option(&_global_buffer_count);
+  _dcmdparser.add_dcmd_option(&_global_buffer_size);
+  _dcmdparser.add_dcmd_option(&_thread_buffer_size);
+  _dcmdparser.add_dcmd_option(&_memory_size);
+  _dcmdparser.add_dcmd_option(&_max_chunk_size);
+  _dcmdparser.add_dcmd_option(&_sample_threads);
+};
+
+int JfrConfigureFlightRecorderDCmd::num_arguments() {
+  ResourceMark rm;
+  JfrConfigureFlightRecorderDCmd* dcmd = new JfrConfigureFlightRecorderDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  }
+  return 0;
+}
+
+void JfrConfigureFlightRecorderDCmd::execute(DCmdSource source, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  if (invalid_state(output(), THREAD)) {
+    return;
+  }
+
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JNIHandleBlockManager jni_handle_management(THREAD);
+
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments constructor_args(&result);
+  constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdConfigure", CHECK);
+  const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK);
+  Handle h_dcmd_instance(THREAD, dcmd);
+  assert(h_dcmd_instance.not_null(), "invariant");
+
+  jstring repository_path = NULL;
+  if (_repository_path.is_set() && _repository_path.value() != NULL) {
+    repository_path = JfrJavaSupport::new_string(_repository_path.value(), CHECK);
+  }
+
+  jstring dump_path = NULL;
+  if (_dump_path.is_set() && _dump_path.value() != NULL) {
+    dump_path = JfrJavaSupport::new_string(_dump_path.value(), CHECK);
+  }
+
+  jobject stack_depth = NULL;
+  if (_stack_depth.is_set()) {
+    stack_depth = JfrJavaSupport::new_java_lang_Integer((jint)_stack_depth.value(), CHECK);
+  }
+
+  jobject global_buffer_count = NULL;
+  if (_global_buffer_count.is_set()) {
+    global_buffer_count = JfrJavaSupport::new_java_lang_Long(_global_buffer_count.value(), CHECK);
+  }
+
+  jobject global_buffer_size = NULL;
+  if (_global_buffer_size.is_set()) {
+    global_buffer_size = JfrJavaSupport::new_java_lang_Long(_global_buffer_size.value(), CHECK);
+  }
+
+  jobject thread_buffer_size = NULL;
+  if (_thread_buffer_size.is_set()) {
+    thread_buffer_size = JfrJavaSupport::new_java_lang_Long(_thread_buffer_size.value(), CHECK);
+  }
+
+  jobject max_chunk_size = NULL;
+  if (_max_chunk_size.is_set()) {
+    max_chunk_size = JfrJavaSupport::new_java_lang_Long(_max_chunk_size.value(), CHECK);
+  }
+
+  jobject memory_size = NULL;
+  if (_memory_size.is_set()) {
+    memory_size = JfrJavaSupport::new_java_lang_Long(_memory_size.value(), CHECK);
+  }
+
+  jobject sample_threads = NULL;
+  if (_sample_threads.is_set()) {
+    sample_threads = JfrJavaSupport::new_java_lang_Boolean(_sample_threads.value(), CHECK);
+  }
+
+  static const char klass[] = "jdk/jfr/internal/dcmd/DCmdConfigure";
+  static const char method[] = "execute";
+  static const char signature[] = "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/Integer;"
+    "Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Long;"
+    "Ljava/lang/Long;Ljava/lang/Boolean;)Ljava/lang/String;";
+
+  JfrJavaArguments execute_args(&result, klass, method, signature, CHECK);
+  execute_args.set_receiver(h_dcmd_instance);
+
+  // params
+  execute_args.push_jobject(repository_path);
+  execute_args.push_jobject(dump_path);
+  execute_args.push_jobject(stack_depth);
+  execute_args.push_jobject(global_buffer_count);
+  execute_args.push_jobject(global_buffer_size);
+  execute_args.push_jobject(thread_buffer_size);
+  execute_args.push_jobject(memory_size);
+  execute_args.push_jobject(max_chunk_size);
+  execute_args.push_jobject(sample_threads);
+
+  JfrJavaSupport::call_virtual(&execute_args, THREAD);
+  handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD);
+}
+
+bool register_jfr_dcmds() {
+  uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI | DCmd_Source_MBean;
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JfrCheckFlightRecordingDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JfrDumpFlightRecordingDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JfrStartFlightRecordingDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JfrStopFlightRecordingDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JfrConfigureFlightRecorderDCmd>(full_export, true, false));
+
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JfrUnlockCommercialFeaturesDCmd>(full_export, true, false));
+  return true;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/dcmd/jfrDcmds.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JFRDCMDS_HPP
+#define SHARE_VM_JFR_JFRDCMDS_HPP
+
+#include "services/diagnosticCommand.hpp"
+
+class JfrDumpFlightRecordingDCmd : public DCmdWithParser {
+ protected:
+  DCmdArgument<char*> _name;
+  DCmdArgument<char*> _filename;
+  DCmdArgument<NanoTimeArgument> _maxage;
+  DCmdArgument<MemorySizeArgument> _maxsize;
+  DCmdArgument<char*> _begin;
+  DCmdArgument<char*> _end;
+  DCmdArgument<bool>  _path_to_gc_roots;
+
+ public:
+  JfrDumpFlightRecordingDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "JFR.dump";
+  }
+  static const char* description() {
+    return "Copies contents of a JFR recording to file. Either the name or the recording id must be specified.";
+  }
+  static const char* impact() {
+    return "Low";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+
+class JfrCheckFlightRecordingDCmd : public DCmdWithParser {
+ protected:
+  DCmdArgument<char*> _name;
+  DCmdArgument<bool>  _verbose;
+
+ public:
+  JfrCheckFlightRecordingDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "JFR.check";
+  }
+  static const char* description() {
+    return "Checks running JFR recording(s)";
+  }
+  static const char* impact() {
+    return "Low";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+
+class JfrStartFlightRecordingDCmd : public DCmdWithParser {
+ protected:
+  DCmdArgument<char*> _name;
+  DCmdArgument<StringArrayArgument*> _settings;
+  DCmdArgument<NanoTimeArgument> _delay;
+  DCmdArgument<NanoTimeArgument> _duration;
+  DCmdArgument<bool> _disk;
+  DCmdArgument<char*> _filename;
+  DCmdArgument<NanoTimeArgument> _maxage;
+  DCmdArgument<MemorySizeArgument> _maxsize;
+  DCmdArgument<bool> _dump_on_exit;
+  DCmdArgument<bool> _path_to_gc_roots;
+
+ public:
+  JfrStartFlightRecordingDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "JFR.start";
+  }
+  static const char* description() {
+    return "Starts a new JFR recording";
+  }
+  static const char* impact() {
+    return "Medium: Depending on the settings for a recording, the impact can range from low to high.";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+
+class JfrStopFlightRecordingDCmd : public DCmdWithParser {
+ protected:
+  DCmdArgument<char*> _name;
+  DCmdArgument<char*> _filename;
+
+ public:
+  JfrStopFlightRecordingDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "JFR.stop";
+  }
+  static const char* description() {
+    return "Stops a JFR recording";
+  }
+  static const char* impact() {
+    return "Low";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+
+class JfrRuntimeOptions;
+
+class JfrConfigureFlightRecorderDCmd : public DCmdWithParser {
+  friend class JfrOptionSet;
+ protected:
+  DCmdArgument<char*> _repository_path;
+  DCmdArgument<char*> _dump_path;
+  DCmdArgument<jlong> _stack_depth;
+  DCmdArgument<jlong> _global_buffer_count;
+  DCmdArgument<jlong> _global_buffer_size;
+  DCmdArgument<jlong> _thread_buffer_size;
+  DCmdArgument<jlong> _memory_size;
+  DCmdArgument<jlong> _max_chunk_size;
+  DCmdArgument<bool>  _sample_threads;
+
+ public:
+  JfrConfigureFlightRecorderDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "JFR.configure";
+  }
+  static const char* description() {
+    return "Configure JFR";
+  }
+  static const char* impact() {
+    return "Low";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+
+class JfrUnlockCommercialFeaturesDCmd : public DCmd {
+public:
+  JfrUnlockCommercialFeaturesDCmd(outputStream* output, bool heap) : DCmd(output, heap) { }
+  static const char* name() { return "VM.unlock_commercial_features"; }
+  static const char* description() {
+    return "Simulate commercial features unlocking for Zulu.";
+  }
+  static const char* impact() { return "Low"; }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
+  static int num_arguments() { return 0; }
+  virtual void execute(DCmdSource source, TRAPS) {
+    UnlockCommercialFeatures = true;
+  }
+};
+
+bool register_jfr_dcmds();
+
+#endif // SHARE_VM_JFR_JFRDCMDS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,1551 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "classfile/classFileParser.hpp"
+#include "classfile/classFileStream.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/stackMapTable.hpp"
+#include "classfile/verificationType.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/jni/jfrUpcalls.hpp"
+#include "jfr/support/jfrEventClass.hpp"
+#include "jfr/utilities/jfrBigEndian.hpp"
+#include "jfr/writers/jfrBigEndianWriter.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+//#include "oops/array.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/method.hpp"
+#include "prims/jvmtiRedefineClasses.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+static const u2 number_of_new_methods = 5;
+static const u2 number_of_new_fields = 3;
+static const int extra_stream_bytes = 0x280;
+static const u2 invalid_cp_index = 0;
+
+static const char* utf8_constants[] = {
+  "Code",         // 0
+  "J",            // 1
+  "commit",       // 2
+  "eventHandler", // 3
+  "Ljdk/jfr/internal/handlers/EventHandler;", // 4
+  "duration",     // 5
+  "begin",        // 6
+  "()V",          // 7
+  "isEnabled",    // 8
+  "()Z",          // 9
+  "end",          // 10
+  "shouldCommit", // 11
+  "startTime",    // 12
+  "<clinit>",     // 13
+  "jdk/jfr/FlightRecorder", // 14
+  "register", // 15
+  "(Ljava/lang/Class;)V", // 16 // LAST_REQUIRED_UTF8
+  "StackMapTable", // 17
+  "Exceptions", // 18
+  "LineNumberTable", // 20
+  "LocalVariableTable", // 21
+  "LocalVariableTypeTable", // 22
+  "RuntimeVisibleAnnotation" // 23
+};
+
+enum utf8_req_symbols {
+  UTF8_REQ_Code,
+  UTF8_REQ_J_FIELD_DESC,
+  UTF8_REQ_commit,
+  UTF8_REQ_eventHandler,
+  UTF8_REQ_eventHandler_FIELD_DESC,
+  UTF8_REQ_duration,
+  UTF8_REQ_begin,
+  UTF8_REQ_EMPTY_VOID_METHOD_DESC,
+  UTF8_REQ_isEnabled,
+  UTF8_REQ_EMPTY_BOOLEAN_METHOD_DESC,
+  UTF8_REQ_end,
+  UTF8_REQ_shouldCommit,
+  UTF8_REQ_startTime,
+  UTF8_REQ_clinit,
+  UTF8_REQ_FlightRecorder,
+  UTF8_REQ_register,
+  UTF8_REQ_CLASS_VOID_METHOD_DESC,
+  NOF_UTF8_REQ_SYMBOLS
+};
+
+enum utf8_opt_symbols {
+  UTF8_OPT_StackMapTable = NOF_UTF8_REQ_SYMBOLS,
+  UTF8_OPT_Exceptions,
+  UTF8_OPT_LineNumberTable,
+  UTF8_OPT_LocalVariableTable,
+  UTF8_OPT_LocalVariableTypeTable,
+  UTF8_OPT_RuntimeVisibleAnnotation,
+  NOF_UTF8_SYMBOLS
+};
+
+static u1 empty_void_method_code_attribute[] = {
+  0x0,
+  0x0,
+  0x0,
+  0xd, // attribute len
+  0x0,
+  0x0, // max stack
+  0x0,
+  0x1, // max locals
+  0x0,
+  0x0,
+  0x0,
+  0x1, // code length
+  Bytecodes::_return,
+  0x0,
+  0x0, // ex table len
+  0x0,
+  0x0  // attributes_count
+};
+
+static u1 boolean_method_code_attribute[] = {
+  0x0,
+  0x0,
+  0x0,
+  0xe,
+  0x0,
+  0x1, // max stack
+  0x0,
+  0x1, // max locals
+  0x0,
+  0x0,
+  0x0,
+  0x2,
+  Bytecodes::_iconst_0,
+  Bytecodes::_ireturn,
+  0x0,
+  0x0, // ex table len
+  0x0,
+  0x0, // attributes_count
+};
+
+// annotation processing support
+
+enum {  // initial annotation layout
+  atype_off = 0,      // utf8 such as 'Ljava/lang/annotation/Retention;'
+  count_off = 2,      // u2   such as 1 (one value)
+  member_off = 4,     // utf8 such as 'value'
+  tag_off = 6,        // u1   such as 'c' (type) or 'e' (enum)
+  e_tag_val = 'e',
+  e_type_off = 7,   // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;'
+  e_con_off = 9,    // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME'
+  e_size = 11,     // end of 'e' annotation
+  c_tag_val = 'c',    // payload is type
+  c_con_off = 7,    // utf8 payload, such as 'I'
+  c_size = 9,       // end of 'c' annotation
+  s_tag_val = 's',    // payload is String
+  s_con_off = 7,    // utf8 payload, such as 'Ljava/lang/String;'
+  s_size = 9,
+  min_size = 6        // smallest possible size (zero members)
+};
+
+static int skip_annotation_value(const address, int, int); // fwd decl
+
+// Skip an annotation.  Return >=limit if there is any problem.
+static int next_annotation_index(const address buffer, int limit, int index) {
+  assert(buffer != NULL, "invariant");
+  index += 2;  // skip atype
+  if ((index += 2) >= limit) {
+    return limit;
+  }
+  int nof_members = JfrBigEndian::read<u2>(buffer + index - 2);
+  while (--nof_members >= 0 && index < limit) {
+    index += 2; // skip member
+    index = skip_annotation_value(buffer, limit, index);
+  }
+  return index;
+}
+
+// Skip an annotation value.  Return >=limit if there is any problem.
+static int skip_annotation_value(const address buffer, int limit, int index) {
+  assert(buffer != NULL, "invariant");
+  // value := switch (tag:u1) {
+  //   case B, C, I, S, Z, D, F, J, c: con:u2;
+  //   case e: e_class:u2 e_name:u2;
+  //   case s: s_con:u2;
+  //   case [: do(nval:u2) {value};
+  //   case @: annotation;
+  //   case s: s_con:u2;
+  // }
+  if ((index += 1) >= limit) {
+    return limit;
+  }
+  const u1 tag = buffer[index - 1];
+  switch (tag) {
+    case 'B':
+    case 'C':
+    case 'I':
+    case 'S':
+    case 'Z':
+    case 'D':
+    case 'F':
+    case 'J':
+    case 'c':
+    case 's':
+      index += 2;  // skip con or s_con
+      break;
+    case 'e':
+      index += 4;  // skip e_class, e_name
+      break;
+    case '[':
+      {
+        if ((index += 2) >= limit) {
+          return limit;
+        }
+        int nof_values = JfrBigEndian::read<u2>(buffer + index - 2);
+        while (--nof_values >= 0 && index < limit) {
+          index = skip_annotation_value(buffer, limit, index);
+        }
+      }
+      break;
+    case '@':
+      index = next_annotation_index(buffer, limit, index);
+      break;
+    default:
+      return limit;  //  bad tag byte
+  }
+  return index;
+}
+
+static const u2 number_of_elements_offset = (u2)2;
+static const u2 element_name_offset = (u2)(number_of_elements_offset + 2);
+static const u2 element_name_size = (u2)2;
+static const u2 value_type_relative_offset = (u2)2;
+static const u2 value_relative_offset = (u2)(value_type_relative_offset + 1);
+
+// see JVMS - 4.7.16. The RuntimeVisibleAnnotations Attribute
+
+class AnnotationElementIterator : public StackObj {
+ private:
+  const InstanceKlass* _ik;
+  const address _buffer;
+  const u2 _limit; // length of annotation
+  mutable u2 _current; // element
+  mutable u2 _next; // element
+  u2 value_index() const {
+    return JfrBigEndian::read<u2>(_buffer + _current + value_relative_offset);
+  }
+
+ public:
+  AnnotationElementIterator(const InstanceKlass* ik, address buffer, u2 limit) : _ik(ik),
+                                                                                 _buffer(buffer),
+                                                                                 _limit(limit),
+                                                                                 _next(element_name_offset),
+                                                                                 _current(element_name_offset) {
+    assert(_buffer != NULL, "invariant");
+    assert(_next == element_name_offset, "invariant");
+    assert(_current == element_name_offset, "invariant");
+  }
+
+  bool has_next() const {
+    return _next < _limit;
+  }
+
+  void move_to_next() const {
+    assert(has_next(), "invariant");
+    _current = _next;
+    if (_next < _limit) {
+      _next = skip_annotation_value(_buffer, _limit, _next + element_name_size);
+    }
+    assert(_next <= _limit, "invariant");
+    assert(_current <= _limit, "invariant");
+  }
+
+  u2 number_of_elements() const {
+    return JfrBigEndian::read<u2>(_buffer + number_of_elements_offset);
+  }
+
+  const Symbol* name() const {
+    assert(_current < _next, "invariant");
+    return _ik->constants()->symbol_at(JfrBigEndian::read<u2>(_buffer + _current));
+  }
+
+  char value_type() const {
+    return JfrBigEndian::read<u1>(_buffer + _current + value_type_relative_offset);
+  }
+
+  jint read_int() const {
+    return _ik->constants()->int_at(value_index());
+  }
+
+  bool read_bool() const {
+    return read_int() != 0;
+  }
+};
+
+class AnnotationIterator : public StackObj {
+ private:
+  const InstanceKlass* _ik;
+  // ensure _limit field is declared before _buffer
+  u2 _limit; // length of annotations array
+  const address _buffer;
+  mutable u2 _current; // annotation
+  mutable u2 _next; // annotation
+
+ public:
+  AnnotationIterator(const InstanceKlass* ik, AnnotationArray* ar) : _ik(ik),
+                                                                     _current(0),
+                                                                     _next(0),
+                                                                     _limit(ar != NULL ? ar->length() : 0),
+                                                                     _buffer(_limit > 2 ? ar->adr_at(2) : NULL) {
+    if (_buffer != NULL) {
+      _limit -= 2; // subtract sizeof(u2) number of annotations field
+    }
+  }
+  bool has_next() const {
+    return _next < _limit;
+  }
+
+  void move_to_next() const {
+    assert(has_next(), "invariant");
+    _current = _next;
+    if (_next < _limit) {
+      _next = next_annotation_index(_buffer, _limit, _next);
+    }
+    assert(_next <= _limit, "invariant");
+    assert(_current <= _limit, "invariant");
+  }
+  const AnnotationElementIterator elements() const {
+    assert(_current < _next, "invariant");
+    return AnnotationElementIterator(_ik, _buffer + _current, _next - _current);
+  }
+  const Symbol* type() const {
+    assert(_buffer != NULL, "invariant");
+    assert(_current < _limit, "invariant");
+    return _ik->constants()->symbol_at(JfrBigEndian::read<u2>(_buffer + _current));
+  }
+};
+
+static unsigned int unused_hash = 0;
+static const char value_name[] = "value";
+static bool has_registered_annotation(const InstanceKlass* ik, const Symbol* annotation_type, bool& value) {
+  assert(annotation_type != NULL, "invariant");
+  AnnotationArray* class_annotations = ik->class_annotations();
+  if (class_annotations == NULL) {
+    return false;
+  }
+
+  const AnnotationIterator annotation_iterator(ik, class_annotations);
+  while (annotation_iterator.has_next()) {
+    annotation_iterator.move_to_next();
+    if (annotation_iterator.type() == annotation_type) {
+      // target annotation found
+      static const Symbol* value_symbol =
+        SymbolTable::lookup_only(value_name, sizeof value_name - 1, unused_hash);
+      assert(value_symbol != NULL, "invariant");
+      const AnnotationElementIterator element_iterator = annotation_iterator.elements();
+      while (element_iterator.has_next()) {
+        element_iterator.move_to_next();
+        if (value_symbol == element_iterator.name()) {
+          // "value" element
+          assert('Z' == element_iterator.value_type(), "invariant");
+          value = element_iterator.read_bool();
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
+static bool registered_annotation_value(const InstanceKlass* ik, const Symbol* const registered_symbol) {
+  assert(registered_symbol != NULL, "invariant");
+  assert(ik != NULL, "invariant");
+  assert(JdkJfrEvent::is_a(ik), "invariant");
+  bool registered_value = false;
+  if (has_registered_annotation(ik, registered_symbol, registered_value)) {
+    return registered_value;
+  }
+  InstanceKlass* super = InstanceKlass::cast(ik->super());
+  return registered_annotation_value(super, registered_symbol);
+}
+
+static const char registered_constant[] = "Ljdk/jfr/Registered;";
+
+// Evaluate to the value of the first found "Ljdk/jfr/Registered;" annotation.
+// Searching moves upwards in the klass hierarchy in order to support
+// inherited annotations in addition to the ability to override.
+static bool should_register_klass(const InstanceKlass* ik) {
+  static const Symbol* const registered_symbol = SymbolTable::lookup_only(registered_constant,
+                                                                          sizeof registered_constant - 1,
+                                                                          unused_hash);
+  assert(registered_symbol != NULL, "invariant");
+  return registered_annotation_value(ik, registered_symbol);
+}
+/*
+ * Map an utf8 constant back to its CONSTANT_UTF8_INFO
+ */
+static u2 utf8_info_index(const InstanceKlass* ik, const Symbol* const target, TRAPS) {
+  assert(target != NULL, "invariant");
+  ConstantPool* cp = ik->constants();
+  const int cp_len = cp->length();
+  for (u2 index = 1; index < cp_len; ++index) {
+    const constantTag tag = cp->tag_at(index);
+    if (tag.is_utf8()) {
+      const Symbol* const utf8_sym = cp->symbol_at(index);
+      assert(utf8_sym != NULL, "invariant");
+      if (utf8_sym == target) {
+        return index;
+      }
+    }
+  }
+  // not in constant pool
+  return invalid_cp_index;
+}
+
+#ifdef ASSERT
+static bool is_index_within_range(u2 index, u2 orig_cp_len, u2 new_cp_entries_len) {
+  return index > 0 && index < orig_cp_len + new_cp_entries_len;
+}
+#endif
+
+static u2 add_utf8_info(JfrBigEndianWriter& writer, const char* utf8_constant, u2 orig_cp_len, u2& new_cp_entries_len) {
+  assert(utf8_constant != NULL, "invariant");
+  writer.write<u1>(JVM_CONSTANT_Utf8);
+  writer.write_utf8_u2_len(utf8_constant);
+  assert(writer.is_valid(), "invariant");
+  // return index for the added utf8 info
+  return orig_cp_len + new_cp_entries_len++;
+}
+
+static u2 add_method_ref_info(JfrBigEndianWriter& writer,
+                              u2 cls_name_index,
+                              u2 method_index,
+                              u2 desc_index,
+                              u2 orig_cp_len,
+                              u2& number_of_new_constants,
+                              TRAPS) {
+  assert(is_index_within_range(cls_name_index, orig_cp_len, number_of_new_constants), "invariant");
+  assert(is_index_within_range(method_index, orig_cp_len, number_of_new_constants), "invariant");
+  assert(is_index_within_range(desc_index, orig_cp_len, number_of_new_constants), "invariant");
+  writer.write<u1>(JVM_CONSTANT_Class);
+  writer.write<u2>(cls_name_index);
+  const u2 cls_entry_index = orig_cp_len + number_of_new_constants;
+  ++number_of_new_constants;
+  writer.write<u1>(JVM_CONSTANT_NameAndType);
+  writer.write<u2>(method_index);
+  writer.write<u2>(desc_index);
+  const u2 nat_entry_index = orig_cp_len + number_of_new_constants;
+  ++number_of_new_constants;
+  writer.write<u1>(JVM_CONSTANT_Methodref);
+  writer.write<u2>(cls_entry_index);
+  writer.write<u2>(nat_entry_index);
+  // post-increment number_of_new_constants
+  // value returned is the index to the added method_ref
+  return orig_cp_len + number_of_new_constants++;
+}
+
+static u2 add_flr_register_method_constants(JfrBigEndianWriter& writer,
+                                            const u2* utf8_indexes,
+                                            u2 orig_cp_len,
+                                            u2& number_of_new_constants,
+                                            TRAPS) {
+  assert(utf8_indexes != NULL, "invariant");
+  return add_method_ref_info(writer,
+                             utf8_indexes[UTF8_REQ_FlightRecorder],
+                             utf8_indexes[UTF8_REQ_register],
+                             utf8_indexes[UTF8_REQ_CLASS_VOID_METHOD_DESC],
+                             orig_cp_len,
+                             number_of_new_constants,
+                             THREAD);
+}
+
+/*
+ * field_info {
+ *   u2             access_flags;
+ *   u2             name_index;
+ *   u2             descriptor_index;
+ *   u2             attributes_count;
+ *   attribute_info attributes[attributes_count];
+ * }
+ */
+static jlong add_field_info(JfrBigEndianWriter& writer, u2 name_index, u2 desc_index, bool is_static = false) {
+  assert(name_index > 0, "invariant");
+  assert(desc_index > 0, "invariant");
+  DEBUG_ONLY(const jlong start_offset = writer.current_offset();)
+  writer.write<u2>(JVM_ACC_SYNTHETIC | JVM_ACC_PRIVATE | (is_static ? JVM_ACC_STATIC : JVM_ACC_TRANSIENT)); // flags
+  writer.write(name_index);
+  writer.write(desc_index);
+  writer.write((u2)0x0); // attributes_count
+  assert(writer.is_valid(), "invariant");
+  DEBUG_ONLY(assert(start_offset + 8 == writer.current_offset(), "invariant");)
+  return writer.current_offset();
+}
+
+static u2 add_field_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) {
+  assert(utf8_indexes != NULL, "invariant");
+  add_field_info(writer,
+                 utf8_indexes[UTF8_REQ_eventHandler],
+                 utf8_indexes[UTF8_REQ_eventHandler_FIELD_DESC],
+                 true); // static
+
+  add_field_info(writer,
+                 utf8_indexes[UTF8_REQ_startTime],
+                 utf8_indexes[UTF8_REQ_J_FIELD_DESC]);
+
+  add_field_info(writer,
+                 utf8_indexes[UTF8_REQ_duration],
+                 utf8_indexes[UTF8_REQ_J_FIELD_DESC]);
+
+  return number_of_new_fields;
+}
+
+/*
+ * method_info {
+ *  u2             access_flags;
+ *  u2             name_index;
+ *  u2             descriptor_index;
+ *  u2             attributes_count;
+ *  attribute_info attributes[attributes_count];
+ * }
+ *
+ * Code_attribute {
+ *   u2 attribute_name_index;
+ *   u4 attribute_length;
+ *   u2 max_stack;
+ *   u2 max_locals;
+ *   u4 code_length;
+ *   u1 code[code_length];
+ *   u2 exception_table_length;
+ *   {   u2 start_pc;
+ *       u2 end_pc;
+ *       u2 handler_pc;
+ *       u2 catch_type;
+ *   } exception_table[exception_table_length];
+ *   u2 attributes_count;
+ *   attribute_info attributes[attributes_count];
+ * }
+ */
+
+static jlong add_method_info(JfrBigEndianWriter& writer,
+                             u2 name_index,
+                             u2 desc_index,
+                             u2 code_index,
+                             const u1* const code,
+                             const size_t code_len) {
+  assert(name_index > 0, "invariant");
+  assert(desc_index > 0, "invariant");
+  assert(code_index > 0, "invariant");
+  DEBUG_ONLY(const jlong start_offset = writer.current_offset();)
+  writer.write<u2>(JVM_ACC_SYNTHETIC | JVM_ACC_PUBLIC); // flags
+  writer.write(name_index);
+  writer.write(desc_index);
+  writer.write<u2>(0x1); // attributes_count ; 1 for "Code" attribute
+  assert(writer.is_valid(), "invariant");
+  DEBUG_ONLY(assert(start_offset + 8 == writer.current_offset(), "invariant");)
+  // Code attribute
+  writer.write(code_index); // "Code"
+  writer.bytes(code, code_len);
+  DEBUG_ONLY(assert((start_offset + 8 + 2 + (jlong)code_len) == writer.current_offset(), "invariant");)
+  return writer.current_offset();
+}
+
+/*
+ * On return, the passed stream will be positioned
+ * just after the constant pool section in the classfile
+ * and the cp length is returned.
+ *
+ * Stream should come in at the start position.
+ */
+static u2 position_stream_after_cp(ClassFileStream* stream) {
+  assert(stream != NULL, "invariant");
+  assert(stream->current_offset() == 0, "invariant");
+  stream->skip_u4_fast(2);  // 8 bytes skipped
+  const u2 cp_len = stream->get_u2_fast();
+  assert(cp_len > 0, "invariant");
+  // now spin the stream position to just after the constant pool
+  for (u2 index = 1; index < cp_len; ++index) {
+    const u1 tag = stream->get_u1_fast(); // cp tag
+    switch (tag) {
+      case JVM_CONSTANT_Class:
+      case JVM_CONSTANT_String: {
+        stream->skip_u2_fast(1); // skip 2 bytes
+        continue;
+      }
+      case JVM_CONSTANT_Fieldref:
+      case JVM_CONSTANT_Methodref:
+      case JVM_CONSTANT_InterfaceMethodref:
+      case JVM_CONSTANT_Integer:
+      case JVM_CONSTANT_Float:
+      case JVM_CONSTANT_NameAndType:
+      case JVM_CONSTANT_InvokeDynamic: {
+        stream->skip_u4_fast(1); // skip 4 bytes
+        continue;
+      }
+      case JVM_CONSTANT_Long:
+      case JVM_CONSTANT_Double: {
+        stream->skip_u4_fast(2); // skip 8 bytes
+        // Skip entry following eigth-byte constant, see JVM book p. 98
+        ++index;
+        continue;
+      }
+      case JVM_CONSTANT_Utf8: {
+        u2 utf8_length = stream->get_u2_fast();
+        stream->skip_u1_fast(utf8_length); // skip 2 + len bytes
+        continue;
+      }
+      case JVM_CONSTANT_MethodHandle:
+      case JVM_CONSTANT_MethodType: {
+        if (tag == JVM_CONSTANT_MethodHandle) {
+          stream->skip_u1_fast(1);
+          stream->skip_u2_fast(1); // skip 3 bytes
+        }
+        else if (tag == JVM_CONSTANT_MethodType) {
+          stream->skip_u2_fast(1); // skip 3 bytes
+        }
+      }
+      continue;
+      default:
+        assert(false, "error in skip logic!");
+        break;
+    } // end switch(tag)
+  }
+  return cp_len;
+}
+
+/*
+* On return, the passed stream will be positioned
+* just after the fields section in the classfile
+* and the number of fields will be returned.
+*
+* Stream should come in positioned just before fields_count
+*/
+static u2 position_stream_after_fields(ClassFileStream* stream) {
+  assert(stream != NULL, "invariant");
+  assert(stream->current_offset() > 0, "invariant");
+  // fields len
+  const u2 orig_fields_len = stream->get_u2_fast();
+  // fields
+  for (u2 i = 0; i < orig_fields_len; ++i) {
+    stream->skip_u2_fast(3);
+    const u2 attrib_info_len = stream->get_u2_fast();
+    for (u2 j = 0; j < attrib_info_len; ++j) {
+      stream->skip_u2_fast(1);
+      const u4 attrib_len = stream->get_u4_fast();
+      stream->skip_u1_fast(attrib_len);
+    }
+  }
+  return orig_fields_len;
+}
+
+/*
+* On return, the passed stream will be positioned
+* just after the methods section in the classfile
+* and the number of methods will be returned.
+*
+* Stream should come in positioned just before methods_count
+*/
+static u2 position_stream_after_methods(JfrBigEndianWriter& writer,
+                                        ClassFileStream* stream,
+                                        const u2* utf8_indexes,
+                                        bool register_klass,
+                                        const Method* clinit_method,
+                                        u4& orig_method_len_offset) {
+  assert(stream != NULL, "invariant");
+  assert(stream->current_offset() > 0, "invariant");
+  assert(utf8_indexes != NULL, "invariant");
+  // We will come back to this location when we
+  // know how many methods there will be.
+  writer.reserve(sizeof(u2));
+  const u2 orig_methods_len = stream->get_u2_fast();
+  // Move copy position past original method_count
+  // in order to not copy the original count
+  orig_method_len_offset += sizeof(u2);
+  for (u2 i = 0; i < orig_methods_len; ++i) {
+    const u4 method_offset = stream->current_offset();
+    stream->skip_u2_fast(1); // Access Flags
+    const u2 name_index = stream->get_u2_fast(); // Name index
+    stream->skip_u2_fast(1); // Descriptor index
+    const u2 attributes_count = stream->get_u2_fast();
+    for (u2 j = 0; j < attributes_count; ++j) {
+      stream->skip_u2_fast(1);
+      const u4 attrib_len = stream->get_u4_fast();
+      stream->skip_u1_fast(attrib_len);
+    }
+    if (clinit_method != NULL && name_index == clinit_method->name_index()) {
+      // The method just parsed is an existing <clinit> method.
+      // If the class has the @Registered(false) annotation, i.e. marking a class
+      // for opting out from automatic registration, then we do not need to do anything.
+      if (!register_klass) {
+        continue;
+      }
+      // Automatic registration with the jfr system is acccomplished
+      // by pre-pending code to the <clinit> method of the class.
+      // We will need to re-create a new <clinit> in a later step.
+      // For now, ensure that this method is excluded from the methods
+      // being copied.
+      writer.bytes(stream->buffer() + orig_method_len_offset,
+                   method_offset - orig_method_len_offset);
+      assert(writer.is_valid(), "invariant");
+
+      // Update copy position to skip copy of <clinit> method
+      orig_method_len_offset = stream->current_offset();
+    }
+  }
+  return orig_methods_len;
+}
+
+static u2 add_method_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) {
+  assert(utf8_indexes != NULL, "invariant");
+  add_method_info(writer,
+                  utf8_indexes[UTF8_REQ_begin],
+                  utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC],
+                  utf8_indexes[UTF8_REQ_Code],
+                  empty_void_method_code_attribute,
+                  sizeof(empty_void_method_code_attribute));
+
+  assert(writer.is_valid(), "invariant");
+
+  add_method_info(writer,
+                  utf8_indexes[UTF8_REQ_end],
+                  utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC],
+                  utf8_indexes[UTF8_REQ_Code],
+                  empty_void_method_code_attribute,
+                  sizeof(empty_void_method_code_attribute));
+
+  assert(writer.is_valid(), "invariant");
+
+  add_method_info(writer,
+                  utf8_indexes[UTF8_REQ_commit],
+                  utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC],
+                  utf8_indexes[UTF8_REQ_Code],
+                  empty_void_method_code_attribute,
+                  sizeof(empty_void_method_code_attribute));
+
+  assert(writer.is_valid(), "invariant");
+
+  add_method_info(writer,
+                  utf8_indexes[UTF8_REQ_isEnabled],
+                  utf8_indexes[UTF8_REQ_EMPTY_BOOLEAN_METHOD_DESC],
+                  utf8_indexes[UTF8_REQ_Code],
+                  boolean_method_code_attribute,
+                  sizeof(boolean_method_code_attribute));
+
+  assert(writer.is_valid(), "invariant");
+
+  add_method_info(writer,
+                  utf8_indexes[UTF8_REQ_shouldCommit],
+                  utf8_indexes[UTF8_REQ_EMPTY_BOOLEAN_METHOD_DESC],
+                  utf8_indexes[UTF8_REQ_Code],
+                  boolean_method_code_attribute,
+                  sizeof(boolean_method_code_attribute));
+  assert(writer.is_valid(), "invariant");
+  return number_of_new_methods;
+}
+
+static void adjust_exception_table(JfrBigEndianWriter& writer, u2 bci_adjustment_offset, const Method* method, TRAPS) {
+  const u2 ex_table_length = method != NULL ? (u2)method->exception_table_length() : 0;
+  writer.write<u2>(ex_table_length); // Exception table length
+  if (ex_table_length > 0) {
+    assert(method != NULL, "invariant");
+    const ExceptionTableElement* const ex_elements = method->exception_table_start();
+    for (int i = 0; i < ex_table_length; ++i) {
+      assert(ex_elements != NULL, "invariant");
+      writer.write<u2>(ex_elements[i].start_pc + bci_adjustment_offset);
+      writer.write<u2>(ex_elements[i].end_pc + bci_adjustment_offset);
+      writer.write<u2>(ex_elements[i].handler_pc + bci_adjustment_offset);
+      writer.write<u2>(ex_elements[i].catch_type_index); // no adjustment
+    }
+  }
+}
+
+enum StackMapFrameTypes {
+  SAME_FRAME_BEGIN = 0,
+  SAME_FRAME_END = 63,
+  SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN = 64,
+  SAME_LOCALS_1_STACK_ITEM_FRAME_END = 127,
+  SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED = 247,
+  CHOP_FRAME_BEGIN = 248,
+  CHOP_FRAME_END = 250,
+  SAME_FRAME_EXTENDED = 251,
+  APPEND_FRAME_BEGIN = 252,
+  APPEND_FRAME_END = 254,
+  FULL_FRAME = 255
+};
+
+static void adjust_stack_map(JfrBigEndianWriter& writer,
+                             Array<u1>* stack_map,
+                             const u2* utf8_indexes,
+                             u2 bci_adjustment_offset,
+                             TRAPS) {
+  assert(stack_map != NULL, "invariant");
+  assert(utf8_indexes != NULL, "invariant");
+  writer.write<u2>(utf8_indexes[UTF8_OPT_StackMapTable]);
+  const jlong stack_map_attrib_len_offset = writer.current_offset();
+  writer.reserve(sizeof(u4));
+  StackMapStream stream(stack_map);
+  const u2 stack_map_entries = stream.get_u2(THREAD);
+  // number of entries
+  writer.write<u2>(stack_map_entries); // new stack map entry added
+  const u1 frame_type = stream.get_u1(THREAD);
+  // SAME_FRAME and SAME_LOCALS_1_STACK_ITEM_FRAME encode
+  // their offset_delta into the actual frame type itself.
+  // If such a frame type is the first frame, then we transform
+  // it to a SAME_FRAME_EXTENDED or a SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED frame.
+  // This is done in order to not overflow frame types accidentally
+  // when adjusting the offset_delta. In changing the frame types,
+  // we can work with an explicit u2 offset_delta field (like the other frame types)
+  if (frame_type <= SAME_FRAME_END) {
+    writer.write<u1>(SAME_FRAME_EXTENDED);
+    writer.write<u2>(frame_type + bci_adjustment_offset);
+  } else if (frame_type >= SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN &&
+             frame_type <= SAME_LOCALS_1_STACK_ITEM_FRAME_END) {
+    writer.write<u1>(SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED);
+    writer.write<u2>((frame_type - SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN) + bci_adjustment_offset);
+  } else if (frame_type >= SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED) {
+      // SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED to FULL_FRAME
+      // has a u2 offset_delta field
+      writer.write<u1>(frame_type);
+      writer.write<u2>(stream.get_u2(THREAD) + bci_adjustment_offset);
+  } else {
+    assert(false, "stackMapFrame type is invalid");
+  }
+
+  while (!stream.at_end()) {
+    writer.write<u1>(stream.get_u1(THREAD));
+  }
+
+  u4 stack_map_attrib_len = writer.current_offset() - stack_map_attrib_len_offset;
+  // the stack_map_table_attributes_length value is exclusive
+  stack_map_attrib_len -= sizeof(u4);
+  writer.write_at_offset(stack_map_attrib_len, stack_map_attrib_len_offset);
+}
+
+static void adjust_line_number_table(JfrBigEndianWriter& writer,
+                                     const u2* utf8_indexes,
+                                     u4 bci_adjustement_offset,
+                                     const Method* method,
+                                     TRAPS) {
+  assert(utf8_indexes != NULL, "invariant");
+  assert(method != NULL, "invariant");
+  assert(method->has_linenumber_table(), "invariant");
+  writer.write(utf8_indexes[UTF8_OPT_LineNumberTable]);
+  const jlong lnt_attributes_length_offset = writer.current_offset();
+  writer.reserve(sizeof(u4));
+  const jlong lnt_attributes_entries_offset = writer.current_offset();
+  writer.reserve(sizeof(u2));
+  u1* lnt = method->compressed_linenumber_table();
+  CompressedLineNumberReadStream lnt_stream(lnt);
+  u2 line_number_table_entries = 0;
+  while (lnt_stream.read_pair()) {
+    ++line_number_table_entries;
+    const u2 bci = (u2)lnt_stream.bci();
+    writer.write<u2>(bci + (u2)bci_adjustement_offset);
+    writer.write<u2>((u2)lnt_stream.line());
+  }
+  writer.write_at_offset(line_number_table_entries, lnt_attributes_entries_offset);
+  u4 lnt_table_attributes_len = writer.current_offset() - lnt_attributes_length_offset;
+  // the line_number_table_attributes_length value is exclusive
+  lnt_table_attributes_len -= sizeof(u4);
+  writer.write_at_offset(lnt_table_attributes_len, lnt_attributes_length_offset);
+}
+
+// returns the number of lvtt entries
+static u2 adjust_local_variable_table(JfrBigEndianWriter& writer,
+                                     const u2* utf8_indexes,
+                                     u2 bci_adjustment_offset,
+                                     const Method* method,
+                                     TRAPS) {
+ assert(utf8_indexes != NULL, "invariant");
+ assert(method != NULL, "invariant");
+ assert(method->has_localvariable_table(), "invariant");
+ writer.write<u2>(utf8_indexes[UTF8_OPT_LocalVariableTable]);
+ const jlong lvt_attributes_length_offset = writer.current_offset();
+ writer.reserve(sizeof(u4));
+ const int lvt_len = method->localvariable_table_length();
+ writer.write<u2>((u2)lvt_len);
+ const LocalVariableTableElement* table = method->localvariable_table_start();
+ assert(table != NULL, "invariant");
+ u2 num_lvtt_entries = 0;
+ for (int i = 0; i < lvt_len; ++i) {
+   writer.write<u2>(table[i].start_bci + bci_adjustment_offset);
+   writer.write<u2>(table[i].length);
+   writer.write<u2>(table[i].name_cp_index);
+   writer.write<u2>(table[i].descriptor_cp_index);
+   writer.write<u2>(table[i].slot);
+   if (table[i].signature_cp_index > 0) {
+     ++num_lvtt_entries;
+   }
+ }
+ u4 lvt_table_attributes_len = writer.current_offset() - lvt_attributes_length_offset;
+ // the lvt_table_attributes_length value is exclusive
+ lvt_table_attributes_len -= sizeof(u4);
+ writer.write_at_offset(lvt_table_attributes_len, lvt_attributes_length_offset);
+ return num_lvtt_entries;
+}
+
+static void adjust_local_variable_type_table(JfrBigEndianWriter& writer,
+                                           const u2* utf8_indexes,
+                                           u2 bci_adjustment_offset,
+                                           u2 num_lvtt_entries,
+                                           const Method* method,
+                                           TRAPS) {
+ assert(num_lvtt_entries > 0, "invariant");
+ writer.write<u2>(utf8_indexes[UTF8_OPT_LocalVariableTypeTable]);
+ const jlong lvtt_attributes_length_offset = writer.current_offset();
+ writer.reserve(sizeof(u4));
+ writer.write<u2>(num_lvtt_entries);
+ const LocalVariableTableElement* table = method->localvariable_table_start();
+ assert(table != NULL, "invariant");
+ const int lvt_len = method->localvariable_table_length();
+ for (int i = 0; i < lvt_len; ++i) {
+   if (table[i].signature_cp_index > 0) {
+     writer.write<u2>(table[i].start_bci + bci_adjustment_offset);
+     writer.write<u2>(table[i].length);
+     writer.write<u2>(table[i].name_cp_index);
+     writer.write<u2>(table[i].signature_cp_index);
+     writer.write<u2>(table[i].slot);
+   }
+ }
+ u4 lvtt_table_attributes_len = writer.current_offset() - lvtt_attributes_length_offset;
+ // the lvtt_table_attributes_length value is exclusive
+ lvtt_table_attributes_len -= sizeof(u4);
+ writer.write_at_offset(lvtt_table_attributes_len, lvtt_attributes_length_offset);
+}
+
+static void adjust_code_attributes(JfrBigEndianWriter& writer,
+                                  const u2* utf8_indexes,
+                                  u2 bci_adjustment_offset,
+                                  const Method* clinit_method,
+                                  TRAPS) {
+ // "Code" attributes
+ assert(utf8_indexes != NULL, "invariant");
+ const jlong code_attributes_offset = writer.current_offset();
+ writer.reserve(sizeof(u2));
+ u2 number_of_code_attributes = 0;
+ if (clinit_method != NULL) {
+   Array<u1>* stack_map = clinit_method->stackmap_data();
+   if (stack_map != NULL) {
+     ++number_of_code_attributes;
+     adjust_stack_map(writer, stack_map, utf8_indexes, bci_adjustment_offset, THREAD);
+     assert(writer.is_valid(), "invariant");
+   }
+   if (clinit_method != NULL && clinit_method->has_linenumber_table()) {
+     ++number_of_code_attributes;
+     adjust_line_number_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD);
+     assert(writer.is_valid(), "invariant");
+   }
+   if (clinit_method != NULL && clinit_method->has_localvariable_table()) {
+     ++number_of_code_attributes;
+     const u2 num_of_lvtt_entries = adjust_local_variable_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD);
+     assert(writer.is_valid(), "invariant");
+     if (num_of_lvtt_entries > 0) {
+       ++number_of_code_attributes;
+       adjust_local_variable_type_table(writer, utf8_indexes, bci_adjustment_offset, num_of_lvtt_entries, clinit_method, THREAD);
+       assert(writer.is_valid(), "invariant");
+     }
+   }
+ }
+
+ // Store the number of code_attributes
+ writer.write_at_offset(number_of_code_attributes, code_attributes_offset);
+}
+
+static jlong insert_clinit_method(const InstanceKlass* ik,
+                                 ClassFileParser& parser,
+                                 JfrBigEndianWriter& writer,
+                                 u2 orig_constant_pool_len,
+                                 const u2* utf8_indexes,
+                                 const u2 register_method_ref_index,
+                                 const Method* clinit_method,
+                                 TRAPS) {
+ assert(utf8_indexes != NULL, "invariant");
+ // The injected code length is always this value.
+ // This is to ensure that padding can be done
+ // where needed and to simplify size calculations.
+ static const u2 injected_code_length = 8;
+ const u2 name_index = utf8_indexes[UTF8_REQ_clinit];
+ const u2 desc_index = utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC];
+ const u2 max_stack = MAX2(clinit_method != NULL ? clinit_method->verifier_max_stack() : 1, 1);
+ const u2 max_locals = MAX2(clinit_method != NULL ? clinit_method->max_locals() : 0, 0);
+ const u2 orig_bytecodes_length = clinit_method != NULL ? (u2)clinit_method->code_size() : 0;
+ const address orig_bytecodes = clinit_method != NULL ? clinit_method->code_base() : NULL;
+ const u2 new_code_length = injected_code_length + orig_bytecodes_length;
+ DEBUG_ONLY(const jlong start_offset = writer.current_offset();)
+ writer.write<u2>(JVM_ACC_STATIC); // flags
+ writer.write<u2>(name_index);
+ writer.write<u2>(desc_index);
+ writer.write<u2>((u2)0x1); // attributes_count // "Code"
+ assert(writer.is_valid(), "invariant");
+ DEBUG_ONLY(assert(start_offset + 8 == writer.current_offset(), "invariant");)
+ // "Code" attribute
+ writer.write<u2>(utf8_indexes[UTF8_REQ_Code]); // "Code"
+ const jlong code_attribute_length_offset = writer.current_offset();
+ writer.reserve(sizeof(u4));
+ writer.write<u2>(max_stack); // max stack
+ writer.write<u2>(max_locals); // max locals
+ writer.write<u4>((u4)new_code_length); // code length
+
+ /* BEGIN CLINIT CODE */
+
+ // Note the use of ldc_w here instead of ldc.
+ // This is to handle all values of "this_class_index"
+ writer.write<u1>((u1)Bytecodes::_ldc_w);
+ writer.write<u2>((u2)parser.this_class_index()); // load constant "this class"
+ writer.write<u1>((u1)Bytecodes::_invokestatic);
+ // invoke "FlightRecorder.register(Ljava/lang/Class;")
+ writer.write<u2>(register_method_ref_index);
+ if (clinit_method == NULL) {
+   writer.write<u1>((u1)Bytecodes::_nop);
+   writer.write<u1>((u1)Bytecodes::_return);
+ } else {
+   // If we are pre-pending to original code,
+   // do padding to minimize disruption to the original.
+   // It might have dependencies on 4-byte boundaries
+   // i.e. lookupswitch and tableswitch instructions
+   writer.write<u1>((u1)Bytecodes::_nop);
+   writer.write<u1>((u1)Bytecodes::_nop);
+   // insert original clinit code
+   writer.bytes(orig_bytecodes, orig_bytecodes_length);
+ }
+
+ /* END CLINIT CODE */
+
+ assert(writer.is_valid(), "invariant");
+ adjust_exception_table(writer, injected_code_length, clinit_method, THREAD);
+ assert(writer.is_valid(), "invariant");
+ adjust_code_attributes(writer, utf8_indexes, injected_code_length, clinit_method, THREAD);
+ assert(writer.is_valid(), "invariant");
+ u4 code_attribute_len = writer.current_offset() - code_attribute_length_offset;
+ // the code_attribute_length value is exclusive
+ code_attribute_len -= sizeof(u4);
+ writer.write_at_offset(code_attribute_len, code_attribute_length_offset);
+ return writer.current_offset();
+}
+
+// Caller needs ResourceMark
+static ClassFileStream* create_new_bytes_for_event_klass(const InstanceKlass* ik, const ClassFileParser& parser, TRAPS) {
+ DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+ static const u2 public_final_flag_mask = JVM_ACC_PUBLIC | JVM_ACC_FINAL;
+ ClassFileStream* const orig_stream = parser.clone_stream();
+ const int orig_stream_length = orig_stream->length();
+ // allocate an identically sized buffer
+ u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, orig_stream_length);
+ if (new_buffer == NULL) {
+   return NULL;
+ }
+ assert(new_buffer != NULL, "invariant");
+ // memcpy the entire [B
+ memcpy(new_buffer, orig_stream->buffer(), orig_stream_length);
+ const u2 orig_cp_len = position_stream_after_cp(orig_stream);
+ assert(orig_cp_len > 0, "invariant");
+ assert(orig_stream->current_offset() > 0, "invariant");
+ orig_stream->skip_u2_fast(3); // access_flags, this_class_index, super_class_index
+ const u2 iface_len = orig_stream->get_u2_fast();
+ orig_stream->skip_u2_fast(iface_len);
+ // fields len
+ const u2 orig_fields_len = orig_stream->get_u2_fast();
+ // fields
+ for (u2 i = 0; i < orig_fields_len; ++i) {
+   orig_stream->skip_u2_fast(3);
+   const u2 attrib_info_len = orig_stream->get_u2_fast();
+   for (u2 j = 0; j < attrib_info_len; ++j) {
+     orig_stream->skip_u2_fast(1);
+     const u4 attrib_len = orig_stream->get_u4_fast();
+     orig_stream->skip_u1_fast(attrib_len);
+   }
+ }
+ // methods
+ const u2 orig_methods_len = orig_stream->get_u2_fast();
+ for (u2 i = 0; i < orig_methods_len; ++i) {
+   const u4 access_flag_offset = orig_stream->current_offset();
+   const u2 flags = orig_stream->get_u2_fast();
+   // Rewrite JVM_ACC_FINAL -> JVM_ACC_PUBLIC
+   if (public_final_flag_mask == flags) {
+     JfrBigEndianWriter accessflagsrewriter(new_buffer + access_flag_offset, sizeof(u2));
+     accessflagsrewriter.write<u2>(JVM_ACC_PUBLIC);
+     assert(accessflagsrewriter.is_valid(), "invariant");
+   }
+   orig_stream->skip_u2_fast(2);
+   const u2 attributes_count = orig_stream->get_u2_fast();
+   for (u2 j = 0; j < attributes_count; ++j) {
+     orig_stream->skip_u2_fast(1);
+     const u4 attrib_len = orig_stream->get_u4_fast();
+     orig_stream->skip_u1_fast(attrib_len);
+   }
+ }
+ return new ClassFileStream(new_buffer, orig_stream_length, NULL);
+}
+
+// Attempt to locate an existing UTF8_INFO mapping the utf8_constant.
+// If no UTF8_INFO exists, add (append) a new one to the constant pool.
+static u2 find_or_add_utf8_info(JfrBigEndianWriter& writer,
+                               const InstanceKlass* ik,
+                               const char* const utf8_constant,
+                               u2 orig_cp_len,
+                               u2& added_cp_entries,
+                               TRAPS) {
+ assert(utf8_constant != NULL, "invariant");
+ TempNewSymbol utf8_sym = SymbolTable::new_symbol(utf8_constant, THREAD);
+ // lookup existing
+ const int utf8_orig_idx = utf8_info_index(ik, utf8_sym, THREAD);
+ if (utf8_orig_idx != invalid_cp_index) {
+   // existing constant pool entry found
+   return utf8_orig_idx;
+ }
+ // no existing match, need to add a new utf8 cp entry
+ assert(invalid_cp_index == utf8_orig_idx, "invariant");
+ // add / append new
+ return add_utf8_info(writer, utf8_constant, orig_cp_len, added_cp_entries);
+}
+
+/*
+ * This routine will resolve the required utf8_constants array
+ * to their constant pool indexes (mapping to their UTF8_INFO's)
+ * Only if a constant is actually needed and does not already exist
+ * will it be added.
+ *
+ * The passed in indexes array will be populated with the resolved indexes.
+ * The number of newly added constant pool entries is returned.
+ */
+static u2 resolve_utf8_indexes(JfrBigEndianWriter& writer,
+                              const InstanceKlass* ik,
+                              u2* const utf8_indexes,
+                              u2 orig_cp_len,
+                              const Method* clinit_method,
+                              TRAPS) {
+ assert(utf8_indexes != NULL, "invariant");
+ u2 added_cp_entries = 0;
+ // resolve all required symbols
+ for (u2 index = 0; index < NOF_UTF8_REQ_SYMBOLS; ++index) {
+   utf8_indexes[index] = find_or_add_utf8_info(writer,
+                                               ik,
+                                               utf8_constants[index],
+                                               orig_cp_len,
+                                               added_cp_entries,
+                                               THREAD);
+ }
+ // Now determine optional constants (mainly "Code" attributes)
+ if (clinit_method != NULL && clinit_method->has_stackmap_table()) {
+   utf8_indexes[UTF8_OPT_StackMapTable] =
+     find_or_add_utf8_info(writer,
+                           ik,
+                           utf8_constants[UTF8_OPT_StackMapTable],
+                           orig_cp_len,
+                           added_cp_entries,
+                           THREAD);
+ } else {
+   utf8_indexes[UTF8_OPT_StackMapTable] = invalid_cp_index;
+ }
+
+ if (clinit_method != NULL && clinit_method->has_linenumber_table()) {
+   utf8_indexes[UTF8_OPT_LineNumberTable] =
+     find_or_add_utf8_info(writer,
+                           ik,
+                           utf8_constants[UTF8_OPT_LineNumberTable],
+                           orig_cp_len,
+                           added_cp_entries,
+                           THREAD);
+ } else {
+   utf8_indexes[UTF8_OPT_LineNumberTable] = invalid_cp_index;
+ }
+
+ if (clinit_method != NULL && clinit_method->has_localvariable_table()) {
+   utf8_indexes[UTF8_OPT_LocalVariableTable] =
+     find_or_add_utf8_info(writer,
+                           ik,
+                           utf8_constants[UTF8_OPT_LocalVariableTable],
+                           orig_cp_len,
+                           added_cp_entries,
+                           THREAD);
+
+   utf8_indexes[UTF8_OPT_LocalVariableTypeTable] =
+     find_or_add_utf8_info(writer,
+                           ik,
+                           utf8_constants[UTF8_OPT_LocalVariableTypeTable],
+                           orig_cp_len,
+                           added_cp_entries,
+                           THREAD);
+ } else {
+   utf8_indexes[UTF8_OPT_LocalVariableTable] = invalid_cp_index;
+   utf8_indexes[UTF8_OPT_LocalVariableTypeTable] = invalid_cp_index;
+ }
+
+ return added_cp_entries;
+}
+
+static u1* new_bytes_for_lazy_instrumentation(InstanceKlass* ik,
+                                             ClassFileParser& parser,
+                                             jint& size_of_new_bytes,
+                                             TRAPS) {
+ assert(ik != NULL, "invariant");
+ // If the class already has a clinit method
+ // we need to take that into account
+ const Method* clinit_method = ik->class_initializer();
+ const bool register_klass = should_register_klass(ik);
+ ClassFileStream* const orig_stream = parser.clone_stream();
+ const int orig_stream_size = orig_stream->length();
+ assert(orig_stream->current_offset() == 0, "invariant");
+ const u2 orig_cp_len = position_stream_after_cp(orig_stream);
+ assert(orig_cp_len > 0, "invariant");
+ assert(orig_stream->current_offset() > 0, "invariant");
+ // Dimension and allocate a working byte buffer
+ // to be used in building up a modified class [B.
+ const jint new_buffer_size = extra_stream_bytes + orig_stream_size;
+ u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, new_buffer_size);
+ if (new_buffer == NULL) {
+   if (true) tty->print_cr ("Thread local allocation (native) for " SIZE_FORMAT
+     " bytes failed in JfrClassAdapter::on_klass_creation", (size_t)new_buffer_size);
+   return NULL;
+ }
+ assert(new_buffer != NULL, "invariant");
+ // [B wrapped in a big endian writer
+ JfrBigEndianWriter writer(new_buffer, new_buffer_size);
+ assert(writer.current_offset() == 0, "invariant");
+ const u4 orig_access_flag_offset = orig_stream->current_offset();
+ // Copy original stream from the beginning up to AccessFlags
+ // This means the original constant pool contents are copied unmodified
+ writer.bytes(orig_stream->buffer(), orig_access_flag_offset);
+ assert(writer.is_valid(), "invariant");
+ assert(writer.current_offset() == (intptr_t)orig_access_flag_offset, "invariant"); // same positions
+ // Our writer now sits just after the last original constant pool entry.
+ // I.e. we are in a good position to append new constant pool entries
+ // This array will contain the resolved indexes
+ // in order to reference UTF8_INFO's needed
+ u2 utf8_indexes[NOF_UTF8_SYMBOLS];
+ // Resolve_utf8_indexes will be conservative in attempting to
+ // locate an existing UTF8_INFO; it will only append constants
+ // that is absolutely required
+ u2 number_of_new_constants = resolve_utf8_indexes(writer, ik, utf8_indexes, orig_cp_len, clinit_method, THREAD);
+ // UTF8_INFO entries now added to the constant pool
+ // In order to invoke a method we would need additional
+ // constants, JVM_CONSTANT_Class, JVM_CONSTANT_NameAndType
+ // and JVM_CONSTANT_Methodref.
+ const u2 flr_register_method_ref_index =
+   register_klass ?
+     add_flr_register_method_constants(writer,
+                                       utf8_indexes,
+                                       orig_cp_len,
+                                       number_of_new_constants,
+                                       THREAD) :  invalid_cp_index;
+
+ // New constant pool entries added and all UTF8_INFO indexes resolved
+ // Now update the class file constant_pool_count with an updated count
+ writer.write_at_offset<u2>(orig_cp_len + number_of_new_constants, 8);
+ assert(writer.is_valid(), "invariant");
+ orig_stream->skip_u2_fast(3); // access_flags, this_class_index, super_class_index
+ const u2 iface_len = orig_stream->get_u2_fast(); // interfaces
+ orig_stream->skip_u2_fast(iface_len);
+ const u4 orig_fields_len_offset = orig_stream->current_offset();
+ // Copy from AccessFlags up to and including interfaces
+ writer.bytes(orig_stream->buffer() + orig_access_flag_offset,
+              orig_fields_len_offset - orig_access_flag_offset);
+ assert(writer.is_valid(), "invariant");
+ const jlong new_fields_len_offset = writer.current_offset();
+ const u2 orig_fields_len = position_stream_after_fields(orig_stream);
+ u4 orig_method_len_offset = orig_stream->current_offset();
+ // Copy up to and including fields
+ writer.bytes(orig_stream->buffer() + orig_fields_len_offset, orig_method_len_offset - orig_fields_len_offset);
+ assert(writer.is_valid(), "invariant");
+ // We are sitting just after the original number of field_infos
+ // so this is a position where we can add (append) new field_infos
+ const u2 number_of_new_fields = add_field_infos(writer, utf8_indexes);
+ assert(writer.is_valid(), "invariant");
+ const jlong new_method_len_offset = writer.current_offset();
+ // Additional field_infos added, update classfile fields_count
+ writer.write_at_offset<u2>(orig_fields_len + number_of_new_fields, new_fields_len_offset);
+ assert(writer.is_valid(), "invariant");
+ // Our current location is now at classfile methods_count
+ const u2 orig_methods_len = position_stream_after_methods(writer,
+                                                           orig_stream,
+                                                           utf8_indexes,
+                                                           register_klass,
+                                                           clinit_method,
+                                                           orig_method_len_offset);
+ const u4 orig_attributes_count_offset = orig_stream->current_offset();
+ // Copy existing methods
+ writer.bytes(orig_stream->buffer() + orig_method_len_offset, orig_attributes_count_offset - orig_method_len_offset);
+ assert(writer.is_valid(), "invariant");
+ // We are sitting just after the original number of method_infos
+ // so this is a position where we can add (append) new method_infos
+ u2 number_of_new_methods = add_method_infos(writer, utf8_indexes);
+
+ // We have just added the new methods.
+ //
+ // What about the state of <clinit>?
+ // We would need to do:
+ // 1. Nothing (@Registered(false) annotation)
+ // 2. Build up a new <clinit> - and if the original class already contains a <clinit>,
+ //                              merging will be neccessary.
+ //
+ if (register_klass) {
+   insert_clinit_method(ik, parser, writer, orig_cp_len, utf8_indexes, flr_register_method_ref_index, clinit_method, THREAD);
+ }
+ number_of_new_methods += clinit_method != NULL ? 0 : register_klass ? 1 : 0;
+ // Update classfile methods_count
+ writer.write_at_offset<u2>(orig_methods_len + number_of_new_methods, new_method_len_offset);
+ assert(writer.is_valid(), "invariant");
+ // Copy last remaining bytes
+ writer.bytes(orig_stream->buffer() + orig_attributes_count_offset, orig_stream_size - orig_attributes_count_offset);
+ assert(writer.is_valid(), "invariant");
+ assert(writer.current_offset() > orig_stream->length(), "invariant");
+ size_of_new_bytes = (jint)writer.current_offset();
+ return new_buffer;
+}
+
+static void log_pending_exception(oop throwable) {
+ assert(throwable != NULL, "invariant");
+ oop msg = java_lang_Throwable::message(throwable);
+ if (msg != NULL) {
+   char* text = java_lang_String::as_utf8_string(msg);
+   if (text != NULL) {
+     if (true) tty->print_cr ("%s", text);
+   }
+ }
+}
+
+static bool should_force_instrumentation() {
+ return !JfrOptionSet::allow_event_retransforms() || JfrEventClassTransformer::is_force_instrumentation();
+}
+
+static ClassFileStream* create_new_bytes_for_subklass(InstanceKlass* ik, ClassFileParser& parser, Thread* t) {
+ assert(JdkJfrEvent::is_a(ik), "invariant");
+ DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t));
+ jint size_of_new_bytes = 0;
+ u1* new_bytes = new_bytes_for_lazy_instrumentation(ik, parser, size_of_new_bytes, t);
+ if (new_bytes == NULL) {
+   return NULL;
+ }
+ assert(new_bytes != NULL, "invariant");
+ assert(size_of_new_bytes > 0, "invariant");
+
+ bool force_instrumentation = should_force_instrumentation();
+ if (Jfr::is_recording() || force_instrumentation) {
+   jint size_instrumented_data = 0;
+   unsigned char* instrumented_data = NULL;
+   const jclass super = (jclass)JNIHandles::make_local(ik->super()->java_mirror());
+   JfrUpcalls::new_bytes_eager_instrumentation(TRACE_ID(ik),
+                                               force_instrumentation,
+                                               super,
+                                               size_of_new_bytes,
+                                               new_bytes,
+                                               &size_instrumented_data,
+                                               &instrumented_data,
+                                               t);
+   if (t->has_pending_exception()) {
+     log_pending_exception(t->pending_exception());
+     t->clear_pending_exception();
+     return NULL;
+   }
+   assert(instrumented_data != NULL, "invariant");
+   assert(size_instrumented_data > 0, "invariant");
+   return new ClassFileStream(instrumented_data, size_instrumented_data, NULL);
+ }
+ return new ClassFileStream(new_bytes, size_of_new_bytes, NULL);
+}
+
+static bool cache_bytes(InstanceKlass* ik, ClassFileStream* new_stream, InstanceKlass* new_ik, TRAPS) {
+ assert(ik != NULL, "invariant");
+ assert(new_ik != NULL, "invariant");
+ assert(new_ik->name() != NULL, "invariant");
+ assert(new_stream != NULL, "invariant");
+ assert(!HAS_PENDING_EXCEPTION, "invariant");
+ static const bool can_retransform = JfrOptionSet::allow_retransforms();
+ if (!can_retransform) {
+   return true;
+ }
+ const jint stream_len = new_stream->length();
+ JvmtiCachedClassFileData* p =
+   (JvmtiCachedClassFileData*)NEW_C_HEAP_ARRAY_RETURN_NULL(u1, offset_of(JvmtiCachedClassFileData, data) + stream_len, mtInternal);
+ if (p == NULL) {
+   if (true) tty->print_cr("Allocation using C_HEAP_ARRAY for " SIZE_FORMAT
+     " bytes failed in JfrClassAdapter::on_klass_creation", (size_t)offset_of(JvmtiCachedClassFileData, data) + stream_len);
+   return false;
+ }
+ p->length = stream_len;
+ memcpy(p->data, new_stream->buffer(), stream_len);
+ new_ik->set_cached_class_file(p);
+ JvmtiCachedClassFileData* const cached_class_data = ik->get_cached_class_file();
+ if (cached_class_data != NULL) {
+   os::free(cached_class_data);
+   ik->set_cached_class_file(NULL);
+ }
+ return true;
+}
+
+static InstanceKlass* create_new_instance_klass(InstanceKlass* ik, ClassFileStream* stream, TRAPS) {
+ assert(stream != NULL, "invariant");
+ ResourceMark rm(THREAD);
+TempNewSymbol parsed_name = NULL;
+ ClassLoaderData* const cld = ik->class_loader_data();
+ Handle pd(THREAD, ik->protection_domain());
+ Symbol* const class_name = ik->name();
+ const char* const klass_name = class_name != NULL ? class_name->as_C_string() : "";
+ InstanceKlass* const new_ik = ClassFileParser(stream).parseClassFile(
+                            class_name,
+                            cld,
+                            pd,
+                            NULL, // host klass
+                            NULL, // cp_patches
+                            parsed_name,
+                            true, // need_verify
+                            THREAD)();
+ if (HAS_PENDING_EXCEPTION) {
+   log_pending_exception(PENDING_EXCEPTION);
+   CLEAR_PENDING_EXCEPTION;
+   return NULL;
+ }
+ assert(new_ik != NULL, "invariant");
+ assert(new_ik->name() != NULL, "invariant");
+ assert(strncmp(ik->name()->as_C_string(), new_ik->name()->as_C_string(), strlen(ik->name()->as_C_string())) == 0, "invariant");
+ return cache_bytes(ik, stream, new_ik, THREAD) ? new_ik : NULL;
+}
+
+static void rewrite_klass_pointer(InstanceKlass*& ik, InstanceKlass* new_ik, ClassFileParser& parser, TRAPS) {
+ assert(ik != NULL, "invariant");
+ assert(new_ik != NULL, "invariant");
+ assert(new_ik->name() != NULL, "invariant");
+ assert(JdkJfrEvent::is(new_ik) || JdkJfrEvent::is_subklass(new_ik), "invariant");
+ assert(!HAS_PENDING_EXCEPTION, "invariant");
+ // assign original InstanceKlass* back onto "its" parser object for proper destruction
+ parser.set_klass_to_deallocate(ik);
+ // now rewrite original pointer to newly created InstanceKlass
+ ik = new_ik;
+}
+
+// During retransform/redefine, copy the Method specific trace flags
+// from the previous ik ("the original klass") to the new ik ("the scratch_klass").
+// The open code for retransform/redefine does not know about these.
+// In doing this migration here, we ensure the new Methods (defined in scratch klass)
+// will carry over trace tags from the old Methods being replaced,
+// ensuring flag/tag continuity while being transparent to open code.
+static void copy_method_trace_flags(const InstanceKlass* the_original_klass, const InstanceKlass* the_scratch_klass) {
+  assert(the_original_klass != NULL, "invariant");
+  assert(the_scratch_klass != NULL, "invariant");
+  assert(the_original_klass->name() == the_scratch_klass->name(), "invariant");
+  const Array<Method*>* old_methods = the_original_klass->methods();
+  const Array<Method*>* new_methods = the_scratch_klass->methods();
+  const bool equal_array_length = old_methods->length() == new_methods->length();
+  // The Method array has the property of being sorted.
+  // If they are the same length, there is a one-to-one mapping.
+  // If they are unequal, there was a method added (currently only
+  // private static methods allowed to be added), use lookup.
+  for (int i = 0; i < old_methods->length(); ++i) {
+    const Method* const old_method = old_methods->at(i);
+    Method* const new_method = equal_array_length ? new_methods->at(i) :
+      the_scratch_klass->find_method(old_method->name(), old_method->signature());
+    assert(new_method != NULL, "invariant");
+    assert(new_method->name() == old_method->name(), "invariant");
+    assert(new_method->signature() == old_method->signature(), "invariant");
+    *new_method->trace_flags_addr() = old_method->trace_flags();
+    assert(new_method->trace_flags() == old_method->trace_flags(), "invariant");
+  }
+}
+
+static bool is_retransforming(const InstanceKlass* ik, TRAPS) {
+  assert(ik != NULL, "invariant");
+  assert(JdkJfrEvent::is_a(ik), "invariant");
+  Symbol* const name = ik->name();
+  assert(name != NULL, "invariant");
+  Handle class_loader(THREAD, ik->class_loader());
+  Handle protection_domain(THREAD, ik->protection_domain());
+  // nota bene: use lock-free dictionary lookup
+  const InstanceKlass* prev_ik = (const InstanceKlass*)SystemDictionary::find(name, class_loader, protection_domain, THREAD);
+  if (prev_ik == NULL) {
+    return false;
+  }
+  // an existing ik implies a retransform/redefine
+  assert(prev_ik != NULL, "invariant");
+  assert(JdkJfrEvent::is_a(prev_ik), "invariant");
+  copy_method_trace_flags(prev_ik, ik);
+  return true;
+}
+
+// target for JFR_ON_KLASS_CREATION hook
+void JfrEventClassTransformer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS) {
+ assert(ik != NULL, "invariant");
+ if (JdkJfrEvent::is(ik)) {
+   ResourceMark rm(THREAD);
+   HandleMark hm(THREAD);
+   ClassFileStream* new_stream = create_new_bytes_for_event_klass(ik, parser, THREAD);
+   if (new_stream == NULL) {
+     if (true) tty->print_cr("JfrClassAdapter: unable to create ClassFileStream");
+     return;
+   }
+   assert(new_stream != NULL, "invariant");
+   InstanceKlass* new_ik = create_new_instance_klass(ik, new_stream, THREAD);
+   if (new_ik == NULL) {
+     if (true) tty->print_cr("JfrClassAdapter: unable to create InstanceKlass");
+     return;
+   }
+   assert(new_ik != NULL, "invariant");
+   // We now need to explicitly tag the replaced klass as the jdk.jfr.Event klass
+   assert(!JdkJfrEvent::is(new_ik), "invariant");
+   JdkJfrEvent::tag_as(new_ik);
+   assert(JdkJfrEvent::is(new_ik), "invariant");
+   rewrite_klass_pointer(ik, new_ik, parser, THREAD);
+   return;
+ }
+ assert(JdkJfrEvent::is_subklass(ik), "invariant");
+ if (is_retransforming(ik, THREAD)) {
+   // not the initial klass load
+   return;
+ }
+ if (ik->is_abstract()) {
+   // abstract classes are not instrumented
+   return;
+ }
+ ResourceMark rm(THREAD);
+ HandleMark hm(THREAD);
+ ClassFileStream* const new_stream = create_new_bytes_for_subklass(ik, parser, THREAD);
+ if (NULL == new_stream) {
+   if (true) tty->print_cr("JfrClassAdapter: unable to create ClassFileStream");
+   return;
+ }
+ assert(new_stream != NULL, "invariant");
+ InstanceKlass* new_ik = create_new_instance_klass(ik, new_stream, THREAD);
+ if (new_ik == NULL) {
+   if (true) tty->print_cr("JfrClassAdapter: unable to create InstanceKlass");
+   return;
+ }
+ assert(new_ik != NULL, "invariant");
+ // would have been tagged already as a subklass during the normal process of traceid assignment
+ assert(JdkJfrEvent::is_subklass(new_ik), "invariant");
+ traceid id = ik->trace_id();
+ ik->set_trace_id(0);
+ new_ik->set_trace_id(id);
+ rewrite_klass_pointer(ik, new_ik, parser, THREAD);
+}
+
+static bool _force_instrumentation = false;
+void JfrEventClassTransformer::set_force_instrumentation(bool force_instrumentation) {
+  _force_instrumentation = force_instrumentation;
+}
+
+bool JfrEventClassTransformer::is_force_instrumentation() {
+  return _force_instrumentation;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_INSTRUMENTATION_JFREVENTCLASSTRANSFORMER_HPP
+#define SHARE_VM_JFR_INSTRUMENTATION_JFREVENTCLASSTRANSFORMER_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/exceptions.hpp"
+
+class ClassFileParser;
+class InstanceKlass;
+
+//
+// Intercepts the initial class load of jdk.jfr.Event and subclasses.
+// Will replace the sent in InstanceKlass* with a class file schema extended InstanceKlass*.
+//
+class JfrEventClassTransformer : AllStatic {
+ public:
+  static void on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS);
+  static void set_force_instrumentation(bool force_instrumentation);
+  static bool is_force_instrumentation();
+};
+
+#endif // SHARE_VM_JFR_INSTRUMENTATION_JFREVENTCLASSTRANSFORMER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "jfr/instrumentation/jfrJvmtiAgent.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/jni/jfrUpcalls.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/support/jfrEventClass.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/exceptions.hpp"
+
+static const size_t ERROR_MSG_BUFFER_SIZE = 256;
+static JfrJvmtiAgent* agent = NULL;
+static jvmtiEnv* jfr_jvmti_env = NULL;
+
+static void check_jvmti_error(jvmtiEnv* jvmti, jvmtiError errnum, const char* str) {
+  if (errnum != JVMTI_ERROR_NONE) {
+    char* errnum_str = NULL;
+    jvmti->GetErrorName(errnum, &errnum_str);
+    if (true) tty->print_cr("ERROR: JfrJvmtiAgent: " INT32_FORMAT " (%s): %s\n",
+                           errnum,
+                           NULL == errnum_str ? "Unknown" : errnum_str,
+                           NULL == str ? "" : str);
+  }
+}
+
+static jvmtiError set_event_notification_mode(jvmtiEventMode mode,
+                                              jvmtiEvent event,
+                                              jthread event_thread,
+                                              ...) {
+  if (jfr_jvmti_env == NULL) {
+    return JVMTI_ERROR_NONE;
+  }
+  const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventNotificationMode(mode, event, event_thread);
+  check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventNotificationMode");
+  return jvmti_ret_code;
+}
+
+static jvmtiError update_class_file_load_hook_event(jvmtiEventMode mode) {
+  return set_event_notification_mode(mode, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, NULL);
+}
+
+static JavaThread* current_java_thread() {
+  Thread* this_thread = Thread::current();
+  assert(this_thread != NULL && this_thread->is_Java_thread(), "invariant");
+  return static_cast<JavaThread*>(this_thread);
+}
+
+// jvmti event callbacks require C linkage
+extern "C" void JNICALL jfr_on_class_file_load_hook(jvmtiEnv *jvmti_env,
+                                                    JNIEnv* jni_env,
+                                                    jclass class_being_redefined,
+                                                    jobject loader,
+                                                    const char* name,
+                                                    jobject protection_domain,
+                                                    jint class_data_len,
+                                                    const unsigned char* class_data,
+                                                    jint* new_class_data_len,
+                                                    unsigned char** new_class_data) {
+  if (class_being_redefined == NULL) {
+    return;
+  }
+  JavaThread* jt = JavaThread::thread_from_jni_environment(jni_env);
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));;
+  ThreadInVMfromNative tvmfn(jt);
+  JfrUpcalls::on_retransform(JfrTraceId::get(class_being_redefined),
+                             class_being_redefined,
+                             class_data_len,
+                             class_data,
+                             new_class_data_len,
+                             new_class_data,
+                             jt);
+}
+
+// caller needs ResourceMark
+static jclass* create_classes_array(jint classes_count, TRAPS) {
+  assert(classes_count > 0, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD));
+  ThreadInVMfromNative tvmfn((JavaThread*)THREAD);
+  jclass* const classes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, jclass, classes_count);
+  if (NULL == classes) {
+    char error_buffer[ERROR_MSG_BUFFER_SIZE];
+    jio_snprintf(error_buffer, ERROR_MSG_BUFFER_SIZE,
+      "Thread local allocation (native) of " SIZE_FORMAT " bytes failed "
+      "in retransform classes", sizeof(jclass) * classes_count);
+    if (true) tty->print_cr("%s", error_buffer);
+    JfrJavaSupport::throw_out_of_memory_error(error_buffer, CHECK_NULL);
+  }
+  return classes;
+}
+
+static void log_and_throw(TRAPS) {
+  if (!HAS_PENDING_EXCEPTION) {
+    DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD));
+    ThreadInVMfromNative tvmfn((JavaThread*)THREAD);
+    if (true) tty->print_cr("JfrJvmtiAgent::retransformClasses failed");
+    JfrJavaSupport::throw_class_format_error("JfrJvmtiAgent::retransformClasses failed", THREAD);
+  }
+}
+
+static void check_exception_and_log(JNIEnv* env, TRAPS) {
+  assert(env != NULL, "invariant");
+  if (env->ExceptionOccurred()) {
+    // array index out of bound
+    DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD));
+    ThreadInVMfromNative tvmfn((JavaThread*)THREAD);
+    if (true) tty->print_cr("GetObjectArrayElement threw an exception");
+    return;
+  }
+}
+
+void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array, TRAPS) {
+  assert(env != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD));
+  if (classes_array == NULL) {
+    return;
+  }
+  const jint classes_count = env->GetArrayLength(classes_array);
+  if (classes_count <= 0) {
+    return;
+  }
+  ResourceMark rm(THREAD);
+  jclass* const classes = create_classes_array(classes_count, CHECK);
+  assert(classes != NULL, "invariant");
+  for (jint i = 0; i < classes_count; i++) {
+    jclass clz = (jclass)env->GetObjectArrayElement(classes_array, i);
+    check_exception_and_log(env, THREAD);
+
+    // inspecting the oop/klass requires a thread transition
+    {
+      ThreadInVMfromNative transition((JavaThread*)THREAD);
+      if (JdkJfrEvent::is_a(clz)) {
+        // should have been tagged already
+        assert(JdkJfrEvent::is_subklass(clz), "invariant");
+      } else {
+        // outside the event hierarchy
+        JdkJfrEvent::tag_as_host(clz);
+      }
+    }
+
+    classes[i] = clz;
+  }
+  if (jfr_jvmti_env->RetransformClasses(classes_count, classes) != JVMTI_ERROR_NONE) {
+    log_and_throw(THREAD);
+  }
+}
+
+static jvmtiError register_callbacks(JavaThread* jt) {
+  assert(jfr_jvmti_env != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
+  jvmtiEventCallbacks callbacks;
+  /* Set callbacks */
+  memset(&callbacks, 0, sizeof(callbacks));
+  callbacks.ClassFileLoadHook = jfr_on_class_file_load_hook;
+  const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+  check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventCallbacks");
+  return jvmti_ret_code;
+}
+
+static jvmtiError register_capabilities(JavaThread* jt) {
+  assert(jfr_jvmti_env != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
+  jvmtiCapabilities capabilities;
+  /* Add JVMTI capabilities */
+  (void)memset(&capabilities, 0, sizeof(capabilities));
+  capabilities.can_retransform_classes = 1;
+  capabilities.can_retransform_any_class = 1;
+  const jvmtiError jvmti_ret_code = jfr_jvmti_env->AddCapabilities(&capabilities);
+  check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "Add Capabilities");
+  return jvmti_ret_code;
+}
+
+static jint create_jvmti_env(JavaThread* jt) {
+  assert(jfr_jvmti_env == NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
+  extern struct JavaVM_ main_vm;
+  JavaVM* vm = &main_vm;
+  return vm->GetEnv((void **)&jfr_jvmti_env, JVMTI_VERSION);
+}
+
+static jvmtiError unregister_callbacks(JavaThread* jt) {
+  if (jfr_jvmti_env == NULL) {
+    return JVMTI_ERROR_NONE;
+  }
+  jvmtiEventCallbacks callbacks;
+  /* Set empty callbacks */
+  memset(&callbacks, 0, sizeof(callbacks));
+  const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+  check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventCallbacks");
+  return jvmti_ret_code;
+}
+
+JfrJvmtiAgent::JfrJvmtiAgent() {}
+
+JfrJvmtiAgent::~JfrJvmtiAgent() {
+  JavaThread* jt = current_java_thread();
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
+  ThreadToNativeFromVM transition(jt);
+  update_class_file_load_hook_event(JVMTI_DISABLE);
+  unregister_callbacks(jt);
+  if (jfr_jvmti_env != NULL) {
+    jfr_jvmti_env->DisposeEnvironment();
+    jfr_jvmti_env = NULL;
+  }
+  agent = NULL;
+}
+
+static bool initialize() {
+  JavaThread* const jt = current_java_thread();
+  assert(jt != NULL, "invariant");
+  assert(jt->thread_state() == _thread_in_vm, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
+  ThreadToNativeFromVM transition(jt);
+  if (create_jvmti_env(jt) != JNI_OK) {
+    assert(jfr_jvmti_env == NULL, "invariant");
+    return false;
+  }
+  assert(jfr_jvmti_env != NULL, "invariant");
+  if (register_capabilities(jt) != JVMTI_ERROR_NONE) {
+    return false;
+  }
+  if (register_callbacks(jt) != JVMTI_ERROR_NONE) {
+    return false;
+  }
+  if (update_class_file_load_hook_event(JVMTI_ENABLE) != JVMTI_ERROR_NONE) {
+    return false;
+  }
+  return true;
+}
+
+bool JfrJvmtiAgent::create() {
+  assert(jfr_jvmti_env == NULL, "invariant");
+  agent = new JfrJvmtiAgent();
+  if (agent == NULL) {
+    return false;
+  }
+  if (!initialize()) {
+    delete agent;
+    agent = NULL;
+    return false;
+  }
+  return true;
+}
+
+void JfrJvmtiAgent::destroy() {
+  if (agent != NULL) {
+    delete agent;
+    agent = NULL;
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_INSTRUMENTATION_JFRJVMTIAGENT_HPP
+#define SHARE_VM_JFR_INSTRUMENTATION_JFRJVMTIAGENT_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JfrJvmtiAgent : public JfrCHeapObj {
+  friend class JfrRecorder;
+ private:
+  JfrJvmtiAgent();
+  ~JfrJvmtiAgent();
+  static bool create();
+  static void destroy();
+ public:
+  static void retransform_classes(JNIEnv* env, jobjectArray classes, TRAPS);
+};
+
+#endif // SHARE_VM_JFR_INSTRUMENTATION_JFRJVMTIAGENT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jfr.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "runtime/java.hpp"
+
+bool Jfr::is_enabled() {
+  return JfrRecorder::is_enabled();
+}
+
+bool Jfr::is_disabled() {
+  return JfrRecorder::is_disabled();
+}
+
+bool Jfr::is_recording() {
+  return JfrRecorder::is_recording();
+}
+
+void Jfr::on_vm_init() {
+  if (!JfrRecorder::on_vm_init()) {
+    vm_exit_during_initialization("Failure when starting JFR on_vm_init");
+  }
+}
+
+void Jfr::on_vm_start() {
+  if (!JfrRecorder::on_vm_start()) {
+    vm_exit_during_initialization("Failure when starting JFR on_vm_start");
+  }
+}
+
+void Jfr::on_unloading_classes() {
+  if (JfrRecorder::is_created()) {
+    JfrCheckpointManager::write_type_set_for_unloaded_classes();
+  }
+}
+
+void Jfr::on_thread_exit(JavaThread* thread) {
+  JfrThreadLocal::on_exit(thread);
+}
+
+void Jfr::on_thread_destruct(Thread* thread) {
+  if (JfrRecorder::is_created()) {
+    JfrThreadLocal::on_destruct(thread);
+  }
+}
+
+void Jfr::on_vm_shutdown(bool exception_handler) {
+  JfrRecorder::set_is_shutting_down();
+  if (JfrRecorder::is_recording()) {
+    JfrEmergencyDump::on_vm_shutdown(exception_handler);
+  }
+}
+
+void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  LeakProfiler::oops_do(is_alive, f);
+}
+
+void Jfr::weak_oops_do(OopClosure* f) {
+  AlwaysTrueClosure always_true;
+  LeakProfiler::oops_do(&always_true, f);
+}
+
+bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
+  return JfrOptionSet::parse_flight_recorder_option(option, delimiter);
+}
+
+bool Jfr::on_start_flight_recording_option(const JavaVMOption** option, char* delimiter) {
+  return JfrOptionSet::parse_start_flight_recording_option(option, delimiter);
+}
+
+Thread* Jfr::sampler_thread() {
+  return JfrThreadSampling::sampler_thread();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jfr.cpp~	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "runtime/java.hpp"
+
+bool Jfr::is_enabled() {
+  return JfrRecorder::is_enabled();
+}
+
+bool Jfr::is_disabled() {
+  return JfrRecorder::is_disabled();
+}
+
+bool Jfr::is_recording() {
+  return JfrRecorder::is_recording();
+}
+
+void Jfr::on_vm_init() {
+  if (!JfrRecorder::on_vm_init()) {
+    vm_exit_during_initialization("Failure when starting JFR on_vm_init");
+  }
+}
+
+void Jfr::on_vm_start() {
+  if (!JfrRecorder::on_vm_start()) {
+    vm_exit_during_initialization("Failure when starting JFR on_vm_start");
+  }
+}
+
+void Jfr::on_unloading_classes() {
+  if (JfrRecorder::is_created()) {
+    JfrCheckpointManager::write_type_set_for_unloaded_classes();
+  }
+}
+
+void Jfr::on_thread_exit(JavaThread* thread) {
+  JfrThreadLocal::on_exit(thread);
+}
+
+void Jfr::on_thread_destruct(Thread* thread) {
+  if (JfrRecorder::is_created()) {
+    JfrThreadLocal::on_destruct(thread);
+  }
+}
+
+void Jfr::on_vm_shutdown(bool exception_handler) {
+  JfrRecorder::set_is_shutting_down();
+  if (JfrRecorder::is_recording()) {
+    JfrEmergencyDump::on_vm_shutdown(exception_handler);
+  }
+}
+
+void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  LeakProfiler::oops_do(is_alive, f);
+}
+
+bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
+  return JfrOptionSet::parse_flight_recorder_option(option, delimiter);
+}
+
+bool Jfr::on_start_flight_recording_option(const JavaVMOption** option, char* delimiter) {
+  return JfrOptionSet::parse_start_flight_recording_option(option, delimiter);
+}
+
+Thread* Jfr::sampler_thread() {
+  return JfrThreadSampling::sampler_thread();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jfr.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JFR_HPP
+#define SHARE_VM_JFR_JFR_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+
+class BoolObjectClosure;
+class JavaThread;
+class OopClosure;
+class Thread;
+
+extern "C" void JNICALL jfr_register_natives(JNIEnv*, jclass);
+
+//
+// The VM interface to Flight Recorder.
+//
+class Jfr : AllStatic {
+ public:
+  static bool is_enabled();
+  static bool is_disabled();
+  static bool is_recording();
+  static void on_vm_init();
+  static void on_vm_start();
+  static void on_unloading_classes();
+  static void on_thread_exit(JavaThread* thread);
+  static void on_thread_destruct(Thread* thread);
+  static void on_vm_shutdown(bool exception_handler = false);
+  static bool on_flight_recorder_option(const JavaVMOption** option, char* delimiter);
+  static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
+  static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+  static void weak_oops_do(OopClosure* f);
+  static Thread* sampler_thread();
+};
+
+#endif // SHARE_VM_JFR_JFR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jfr.hpp~	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JFR_HPP
+#define SHARE_VM_JFR_JFR_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+
+class BoolObjectClosure;
+class JavaThread;
+class OopClosure;
+class Thread;
+
+extern "C" void JNICALL jfr_register_natives(JNIEnv*, jclass);
+
+//
+// The VM interface to Flight Recorder.
+//
+class Jfr : AllStatic {
+ public:
+  static bool is_enabled();
+  static bool is_disabled();
+  static bool is_recording();
+  static void on_vm_init();
+  static void on_vm_start();
+  static void on_unloading_classes();
+  static void on_thread_exit(JavaThread* thread);
+  static void on_thread_destruct(Thread* thread);
+  static void on_vm_shutdown(bool exception_handler = false);
+  static bool on_flight_recorder_option(const JavaVMOption** option, char* delimiter);
+  static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
+  static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+  static Thread* sampler_thread();
+};
+
+#endif // SHARE_VM_JFR_JFR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jfrEvents.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JFREVENTS_HPP
+#define SHARE_VM_JFR_JFREVENTS_HPP
+/*
+ * Declare your event in jfr/metadata/metadata.xml.
+ *
+ * Include this header to access the machine generated event class.
+ */
+#include "jfrfiles/jfrEventClasses.hpp"
+#include "jfrfiles/jfrEventIds.hpp"
+
+#endif // SHARE_VM_JFR_JFREVENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrGetAllEventClasses.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/symbolTable.hpp"
+#include "jfr/jni/jfrGetAllEventClasses.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/support/jfrEventClass.hpp"
+#include "oops/instanceKlass.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/stack.inline.hpp"
+
+ // incremented during class unloading (safepoint) for each unloaded event class
+static jlong unloaded_event_classes = 0;
+
+jlong JfrEventClasses::unloaded_event_classes_count() {
+  return unloaded_event_classes;
+}
+
+void JfrEventClasses::increment_unloaded_event_class() {
+  // incremented during class unloading (safepoint) for each unloaded event class
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  ++unloaded_event_classes;
+}
+
+static jobject empty_java_util_arraylist = NULL;
+
+static oop new_java_util_arraylist(TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments args(&result, "java/util/ArrayList", "<init>", "()V", CHECK_NULL);
+  JfrJavaSupport::new_object(&args, CHECK_NULL);
+  return (oop)result.get_jobject();
+}
+
+static bool initialize(TRAPS) {
+  static bool initialized = false;
+  if (!initialized) {
+    unloaded_event_classes = 0;
+    assert(NULL == empty_java_util_arraylist, "invariant");
+    const oop array_list = new_java_util_arraylist(CHECK_false);
+    empty_java_util_arraylist = JfrJavaSupport::global_jni_handle(array_list, THREAD);
+    initialized = empty_java_util_arraylist != NULL;
+  }
+  return initialized;
+}
+
+/*
+ * Abstract klasses are filtered out unconditionally.
+ * If a klass is not yet initialized, i.e yet to run its <clinit>
+ * it is also filtered out so we don't accidentally
+ * trigger initialization.
+ */
+static bool is_whitelisted(const Klass* k) {
+  assert(k != NULL, "invariant");
+  return !(k->is_abstract() || k->should_be_initialized());
+}
+
+static void fill_klasses(GrowableArray<const void*>& event_subklasses, const Klass* event_klass, Thread* thread) {
+  assert(event_subklasses.length() == 0, "invariant");
+  assert(event_klass != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
+
+  Stack<const Klass*, mtTracing> mark_stack;
+  MutexLocker ml(Compile_lock, thread);
+  mark_stack.push(event_klass->subklass());
+
+  while (!mark_stack.is_empty()) {
+    const Klass* const current = mark_stack.pop();
+    assert(current != NULL, "null element in stack!");
+
+    if (is_whitelisted(current)) {
+      event_subklasses.append(current);
+    }
+
+    // subclass (depth)
+    const Klass* next_klass = current->subklass();
+    if (next_klass != NULL) {
+      mark_stack.push(next_klass);
+    }
+
+    // siblings (breadth)
+    next_klass = current->next_sibling();
+    if (next_klass != NULL) {
+      mark_stack.push(next_klass);
+    }
+  }
+  assert(mark_stack.is_empty(), "invariant");
+}
+
+ static void transform_klasses_to_local_jni_handles(GrowableArray<const void*>& event_subklasses, Thread* thread) {
+  assert(event_subklasses.is_nonempty(), "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
+
+  for (int i = 0; i < event_subklasses.length(); ++i) {
+    const InstanceKlass* k = static_cast<const InstanceKlass*>(event_subklasses.at(i));
+    assert(is_whitelisted(k), "invariant");
+    event_subklasses.at_put(i, JfrJavaSupport::local_jni_handle(k->java_mirror(), thread));
+  }
+}
+
+static const int initial_size_growable_array = 64;
+
+jobject JfrEventClasses::get_all_event_classes(TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  initialize(THREAD);
+  assert(empty_java_util_arraylist != NULL, "should have been setup already!");
+  static const char jdk_jfr_event_name[] = "jdk/jfr/Event";
+  unsigned int unused_hash = 0;
+  Symbol* const event_klass_name = SymbolTable::lookup_only(jdk_jfr_event_name, sizeof jdk_jfr_event_name - 1, unused_hash);
+
+  if (NULL == event_klass_name) {
+    // not loaded yet
+    return empty_java_util_arraylist;
+  }
+
+  const Klass* const klass = SystemDictionary::resolve_or_null(event_klass_name, THREAD);
+  assert(klass != NULL, "invariant");
+  assert(JdkJfrEvent::is(klass), "invariant");
+
+  if (klass->subklass() == NULL) {
+    return empty_java_util_arraylist;
+  }
+
+  ResourceMark rm(THREAD);
+  GrowableArray<const void*> event_subklasses(THREAD, initial_size_growable_array);
+  fill_klasses(event_subklasses, klass, THREAD);
+
+  if (event_subklasses.is_empty()) {
+    return empty_java_util_arraylist;
+  }
+
+  transform_klasses_to_local_jni_handles(event_subklasses, THREAD);
+
+  Handle h_array_list(THREAD, new_java_util_arraylist(THREAD));
+  assert(h_array_list.not_null(), "invariant");
+
+  static const char add_method_name[] = "add";
+  static const char add_method_signature[] = "(Ljava/lang/Object;)Z";
+  const Klass* const array_list_klass = JfrJavaSupport::klass(empty_java_util_arraylist);
+  assert(array_list_klass != NULL, "invariant");
+
+  const Symbol* const add_method_sym = SymbolTable::lookup(add_method_name, sizeof add_method_name - 1, THREAD);
+  assert(add_method_sym != NULL, "invariant");
+
+  const Symbol* const add_method_sig_sym = SymbolTable::lookup(add_method_signature, sizeof add_method_signature - 1, THREAD);
+  assert(add_method_signature != NULL, "invariant");
+
+  JavaValue result(T_BOOLEAN);
+  for (int i = 0; i < event_subklasses.length(); ++i) {
+    const jclass clazz = (const jclass)event_subklasses.at(i);
+    assert(JdkJfrEvent::is_subklass(clazz), "invariant");
+    JfrJavaArguments args(&result, array_list_klass, add_method_sym, add_method_sig_sym);
+    args.set_receiver(h_array_list());
+    args.push_jobject(clazz);
+    JfrJavaSupport::call_virtual(&args, THREAD);
+    if (HAS_PENDING_EXCEPTION || JNI_FALSE == result.get_jboolean()) {
+      return empty_java_util_arraylist;
+    }
+  }
+  return JfrJavaSupport::local_jni_handle(h_array_list(), THREAD);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrGetAllEventClasses.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_JFR_JNI_JFRGETALLEVENTCLASSES_HPP
+#define SHARE_VM_JFR_JNI_JFRGETALLEVENTCLASSES_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+#include "utilities/exceptions.hpp"
+
+//
+// Responsible for the delivery of currently loaded jdk.jfr.Event subklasses to Java.
+//
+class JfrEventClasses : AllStatic {
+ public:
+  static void increment_unloaded_event_class();
+  static jlong unloaded_event_classes_count();
+  static jobject get_all_event_classes(TRAPS);
+};
+
+#endif // SHARE_VM_JFR_JNI_JFRGETALLEVENTCLASSES_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJavaCall.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "jfr/jni/jfrJavaCall.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/javaCalls.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#ifdef ASSERT
+static bool is_large_value(const JavaValue& value) {
+  return value.get_type() == T_LONG || value.get_type() == T_DOUBLE;
+}
+#endif // ASSERT
+
+static Symbol* resolve(const char* str, TRAPS) {
+  assert(str != NULL, "invariant");
+  return SymbolTable::lookup(str, (int)strlen(str), THREAD);
+}
+
+static Klass* resolve(Symbol* k_sym, TRAPS) {
+  assert(k_sym != NULL, "invariant");
+  return SystemDictionary::resolve_or_fail(k_sym, true, THREAD);
+}
+
+JfrJavaArguments::Parameters::Parameters() : _storage_index(0), _java_stack_slots(0) {
+  JavaValue value(T_VOID);
+  push(value);
+}
+
+void JfrJavaArguments::Parameters::push(const JavaValue& value) {
+  assert(_storage != NULL, "invariant");
+  assert(!is_large_value(value), "invariant");
+  assert(_storage_index < SIZE, "invariant");
+  _storage[_storage_index++] = value;
+  _java_stack_slots++;
+}
+
+void JfrJavaArguments::Parameters::push_large(const JavaValue& value) {
+  assert(_storage != NULL, "invariant");
+  assert(is_large_value(value), "invariant");
+  assert(_storage_index < SIZE, "invariant");
+  _storage[_storage_index++] = value;
+  _java_stack_slots += 2;
+}
+
+void JfrJavaArguments::Parameters::set_receiver(const oop receiver) {
+  assert(_storage != NULL, "invariant");
+  assert(receiver != NULL, "invariant");
+  JavaValue value(T_OBJECT);
+  value.set_jobject((jobject)receiver);
+  _storage[0] = value;
+}
+
+void JfrJavaArguments::Parameters::set_receiver(Handle receiver) {
+  set_receiver(receiver());
+}
+
+oop JfrJavaArguments::Parameters::receiver() const {
+  assert(has_receiver(), "invariant");
+  assert(_storage[0].get_type() == T_OBJECT, "invariant");
+  return (oop)_storage[0].get_jobject();
+}
+
+bool JfrJavaArguments::Parameters::has_receiver() const {
+  assert(_storage != NULL, "invariant");
+  assert(_storage_index >= 1, "invariant");
+  assert(_java_stack_slots >= 1, "invariant");
+  return _storage[0].get_type() == T_OBJECT;
+}
+
+void JfrJavaArguments::Parameters::push_oop(const oop obj) {
+  JavaValue value(T_OBJECT);
+  value.set_jobject((jobject)obj);
+  push(value);
+}
+
+void JfrJavaArguments::Parameters::push_oop(Handle h_obj) {
+  push_oop(h_obj());
+}
+
+void JfrJavaArguments::Parameters::push_jobject(jobject h) {
+  JavaValue value(T_ADDRESS);
+  value.set_jobject(h);
+  push(value);
+}
+
+void JfrJavaArguments::Parameters::push_jint(jint i) {
+  JavaValue value(T_INT);
+  value.set_jint(i);
+  push(value);
+}
+
+void JfrJavaArguments::Parameters::push_jfloat(jfloat f) {
+  JavaValue value(T_FLOAT);
+  value.set_jfloat(f);
+  push(value);
+}
+
+void JfrJavaArguments::Parameters::push_jdouble(jdouble d) {
+  JavaValue value(T_DOUBLE);
+  value.set_jdouble(d);
+  push_large(value);
+}
+
+void JfrJavaArguments::Parameters::push_jlong(jlong l) {
+  JavaValue value(T_LONG);
+  value.set_jlong(l);
+  push_large(value);
+}
+
+// including receiver (even if there is none)
+inline int JfrJavaArguments::Parameters::length() const {
+  assert(_storage_index >= 1, "invariant");
+  return _storage_index;
+}
+
+inline int JfrJavaArguments::Parameters::java_stack_slots() const {
+  return _java_stack_slots;
+}
+
+const JavaValue& JfrJavaArguments::Parameters::values(int idx) const {
+  assert(idx >= 0, "invariant");
+  assert(idx < SIZE, "invariant");
+  return _storage[idx];
+}
+
+void JfrJavaArguments::Parameters::copy(JavaCallArguments& args, TRAPS) const {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  if (has_receiver()) {
+    args.set_receiver(Handle(THREAD, receiver()));
+  }
+  for (int i = 1; i < length(); ++i) {
+    switch(values(i).get_type()) {
+      case T_BOOLEAN:
+      case T_CHAR:
+      case T_SHORT:
+      case T_INT:
+        args.push_int(values(i).get_jint());
+        break;
+      case T_LONG:
+        args.push_long(values(i).get_jlong());
+        break;
+      case T_FLOAT:
+        args.push_float(values(i).get_jfloat());
+        break;
+      case T_DOUBLE:
+        args.push_double(values(i).get_jdouble());
+        break;
+      case T_OBJECT:
+        args.push_oop(Handle(THREAD, (oop)values(i).get_jobject()));
+        break;
+      case T_ADDRESS:
+        args.push_oop(Handle(THREAD, JNIHandles::resolve(values(i).get_jobject())));
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+  }
+}
+
+JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(0) {
+  assert(result != NULL, "invariant");
+}
+
+JfrJavaArguments::JfrJavaArguments(JavaValue* result, const char* klass_name, const char* name, const char* signature, TRAPS) :
+  _result(result),
+  _klass(NULL),
+  _name(NULL),
+  _signature(NULL),
+  _array_length(0) {
+  assert(result != NULL, "invariant");
+  if (klass_name != NULL) {
+    set_klass(klass_name, CHECK);
+  }
+  if (name != NULL) {
+    set_name(name, CHECK);
+  }
+  if (signature != NULL) {
+    set_signature(signature, THREAD);
+  }
+}
+
+JfrJavaArguments::JfrJavaArguments(JavaValue* result, const Klass* klass, const Symbol* name, const Symbol* signature) : _result(result),
+  _klass(NULL),
+  _name(NULL),
+  _signature(NULL),
+  _array_length(0) {
+  assert(result != NULL, "invariant");
+  if (klass != NULL) {
+    set_klass(klass);
+  }
+  if (name != NULL) {
+    set_name(name);
+  }
+  if (signature != NULL) {
+    set_signature(signature);
+  }
+}
+
+Klass* JfrJavaArguments::klass() const {
+  assert(_klass != NULL, "invariant");
+  return const_cast<Klass*>(_klass);
+}
+
+void JfrJavaArguments::set_klass(const char* klass_name, TRAPS) {
+  assert(klass_name != NULL, "invariant");
+  Symbol* const k_sym = resolve(klass_name, CHECK);
+  assert(k_sym != NULL, "invariant");
+  const Klass* const klass = resolve(k_sym, CHECK);
+  set_klass(klass);
+}
+
+void JfrJavaArguments::set_klass(const Klass* klass) {
+  assert(klass != NULL, "invariant");
+  _klass = klass;
+}
+
+Symbol* JfrJavaArguments::name() const {
+  assert(_name != NULL, "invariant");
+  return const_cast<Symbol*>(_name);
+}
+
+void JfrJavaArguments::set_name(const char* name, TRAPS) {
+  assert(name != NULL, "invariant");
+  const Symbol* const sym = resolve(name, CHECK);
+  set_name(sym);
+}
+
+void JfrJavaArguments::set_name(const Symbol* name) {
+  assert(name != NULL, "invariant");
+  _name = name;
+}
+
+Symbol* JfrJavaArguments::signature() const {
+  assert(_signature != NULL, "invariant");
+  return const_cast<Symbol*>(_signature);
+}
+
+void JfrJavaArguments::set_signature(const char* signature, TRAPS) {
+  assert(signature != NULL, "invariant");
+  const Symbol* const sym = resolve(signature, CHECK);
+  set_signature(sym);
+}
+
+void JfrJavaArguments::set_signature(const Symbol* signature) {
+  assert(signature != NULL, "invariant");
+  _signature = signature;
+}
+
+int JfrJavaArguments::array_length() const {
+  return _array_length;
+}
+
+void JfrJavaArguments::set_array_length(int length) {
+  assert(length >= 0, "invariant");
+  _array_length = length;
+}
+
+JavaValue* JfrJavaArguments::result() const {
+  assert(_result != NULL, "invariant");
+  return const_cast<JavaValue*>(_result);
+}
+
+int JfrJavaArguments::length() const {
+  return _params.length();
+}
+
+bool JfrJavaArguments::has_receiver() const {
+  return _params.has_receiver();
+}
+
+oop JfrJavaArguments::receiver() const {
+  return _params.receiver();
+}
+
+void JfrJavaArguments::set_receiver(const oop receiver) {
+  _params.set_receiver(receiver);
+}
+
+void JfrJavaArguments::set_receiver(Handle receiver) {
+  _params.set_receiver(receiver);
+}
+
+void JfrJavaArguments::push_oop(const oop obj) {
+  _params.push_oop(obj);
+}
+
+void JfrJavaArguments::push_oop(Handle h_obj) {
+  _params.push_oop(h_obj);
+}
+
+void JfrJavaArguments::push_jobject(jobject h) {
+  _params.push_jobject(h);
+}
+
+void JfrJavaArguments::push_int(jint i) {
+  _params.push_jint(i);
+}
+
+void JfrJavaArguments::push_float(jfloat f) {
+  _params.push_jfloat(f);
+}
+
+void JfrJavaArguments::push_double(jdouble d) {
+  _params.push_jdouble(d);
+}
+
+void JfrJavaArguments::push_long(jlong l) {
+  _params.push_jlong(l);
+}
+
+const JavaValue& JfrJavaArguments::param(int idx) const {
+  return _params.values(idx);
+}
+
+int JfrJavaArguments::java_call_arg_slots() const {
+  return _params.java_stack_slots();
+}
+
+void JfrJavaArguments::copy(JavaCallArguments& args, TRAPS) {
+  _params.copy(args, THREAD);
+}
+
+void JfrJavaCall::call_static(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JavaCallArguments jcas(args->java_call_arg_slots());
+  args->copy(jcas, CHECK);
+  JavaCalls::call_static(args->result(), args->klass(), args->name(), args->signature(), &jcas, THREAD);
+}
+
+void JfrJavaCall::call_special(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  assert(args->has_receiver(), "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JavaCallArguments jcas(args->java_call_arg_slots());
+  args->copy(jcas, CHECK);
+  JavaCalls::call_special(args->result(), args->klass(), args->name(), args->signature(), &jcas, THREAD);
+}
+
+void JfrJavaCall::call_virtual(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  assert(args->has_receiver(), "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  JavaCallArguments jcas(args->java_call_arg_slots());
+  args->copy(jcas, CHECK);
+  JavaCalls::call_virtual(args->result(), args->klass(), args->name(), args->signature(), &jcas, THREAD);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJavaCall.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JNI_JFRJAVACALL_HPP
+#define SHARE_VM_JFR_JNI_JFRJAVACALL_HPP
+
+#include "jni.h"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "utilities/exceptions.hpp"
+
+class JavaCallArguments;
+class JavaThread;
+class JavaValue;
+class Klass;
+class Symbol;
+
+class JfrJavaArguments : public StackObj {
+  friend class JfrJavaCall;
+ public:
+  JfrJavaArguments(JavaValue* result);
+  JfrJavaArguments(JavaValue* result, const char* klass_name, const char* name, const char* signature, TRAPS);
+  JfrJavaArguments(JavaValue* result, const Klass* klass, const Symbol* name, const Symbol* signature);
+
+  Klass* klass() const;
+  void set_klass(const char* klass_name, TRAPS);
+  void set_klass(const Klass* klass);
+
+  Symbol* name() const;
+  void set_name(const char* name, TRAPS);
+  void set_name(const Symbol* name);
+
+  Symbol* signature() const;
+  void set_signature(const char* signature, TRAPS);
+  void set_signature(const Symbol* signature);
+
+  int array_length() const;
+  void set_array_length(int length);
+
+  JavaValue* result() const;
+
+  bool has_receiver() const;
+  void set_receiver(const oop receiver);
+  void set_receiver(Handle receiver);
+  oop receiver() const;
+
+  // parameters
+  void push_oop(const oop obj);
+  void push_oop(Handle h_obj);
+  void push_jobject(jobject h);
+  void push_int(jint i);
+  void push_double(jdouble d);
+  void push_long(jlong l);
+  void push_float(jfloat f);
+
+  int length() const;
+  const JavaValue& param(int idx) const;
+
+ private:
+  class Parameters {
+    friend class JfrJavaArguments;
+   private:
+    enum { SIZE = 16};
+    JavaValue _storage[SIZE];
+    int _storage_index;
+    int _java_stack_slots;
+
+    Parameters();
+    Parameters(const Parameters&); // no impl
+    Parameters& operator=(const Parameters&); // no impl
+
+    void push(const JavaValue& value);
+    void push_large(const JavaValue& value);
+
+    void push_oop(const oop obj);
+    void push_oop(Handle h_obj);
+    void push_jobject(jobject h);
+    void push_jint(jint i);
+    void push_jdouble(jdouble d);
+    void push_jlong(jlong l);
+    void push_jfloat(jfloat f);
+
+    bool has_receiver() const;
+    void set_receiver(const oop receiver);
+    void set_receiver(Handle receiver);
+    oop receiver() const;
+
+    int length() const;
+    int java_stack_slots() const;
+
+    void copy(JavaCallArguments& args, TRAPS) const;
+    const JavaValue& values(int idx) const;
+  };
+
+  Parameters _params;
+  const JavaValue* const _result;
+  const Klass* _klass;
+  const Symbol* _name;
+  const Symbol* _signature;
+  int _array_length;
+
+  int java_call_arg_slots() const;
+  void copy(JavaCallArguments& args, TRAPS);
+};
+
+class JfrJavaCall : public AllStatic {
+  friend class JfrJavaSupport;
+ private:
+  static void call_static(JfrJavaArguments* args, TRAPS);
+  static void call_special(JfrJavaArguments* args, TRAPS);
+  static void call_virtual(JfrJavaArguments* args, TRAPS);
+};
+
+#endif // SHARE_VM_JFR_JNI_JFRJAVACALL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJavaSupport.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jni.h"
+#include "classfile/javaClasses.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "jfr/jni/jfrJavaCall.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/support/jfrThreadId.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "oops/objArrayOop.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/fieldDescriptor.hpp"
+#include "runtime/java.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/thread.inline.hpp"
+//#include "runtime/threadSMR.hpp"
+
+#ifdef ASSERT
+void JfrJavaSupport::check_java_thread_in_vm(Thread* t) {
+  assert(t != NULL, "invariant");
+  assert(t->is_Java_thread(), "invariant");
+  assert(((JavaThread*)t)->thread_state() == _thread_in_vm, "invariant");
+}
+
+void JfrJavaSupport::check_java_thread_in_native(Thread* t) {
+  assert(t != NULL, "invariant");
+  assert(t->is_Java_thread(), "invariant");
+  assert(((JavaThread*)t)->thread_state() == _thread_in_native, "invariant");
+}
+#endif
+
+/*
+ *  Handles and references
+ */
+jobject JfrJavaSupport::local_jni_handle(const oop obj, Thread* t) {
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+  return t->active_handles()->allocate_handle(obj);
+}
+
+jobject JfrJavaSupport::local_jni_handle(const jobject handle, Thread* t) {
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+  const oop obj = JNIHandles::resolve(handle);
+  return obj == NULL ? NULL : local_jni_handle(obj, t);
+}
+
+void JfrJavaSupport::destroy_local_jni_handle(jobject handle) {
+  JNIHandles::destroy_local(handle);
+}
+
+jobject JfrJavaSupport::global_jni_handle(const oop obj, Thread* t) {
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+  HandleMark hm(t);
+  return JNIHandles::make_global(Handle(t, obj));
+}
+
+jobject JfrJavaSupport::global_jni_handle(const jobject handle, Thread* t) {
+  const oop obj = JNIHandles::resolve(handle);
+  return obj == NULL ? NULL : global_jni_handle(obj, t);
+}
+
+void JfrJavaSupport::destroy_global_jni_handle(const jobject handle) {
+  JNIHandles::destroy_global(handle);
+}
+
+oop JfrJavaSupport::resolve_non_null(jobject obj) {
+  return JNIHandles::resolve_non_null(obj);
+}
+
+/*
+ *  Method invocation
+ */
+void JfrJavaSupport::call_static(JfrJavaArguments* args, TRAPS) {
+  JfrJavaCall::call_static(args, THREAD);
+}
+
+void JfrJavaSupport::call_special(JfrJavaArguments* args, TRAPS) {
+  JfrJavaCall::call_special(args, THREAD);
+}
+
+void JfrJavaSupport::call_virtual(JfrJavaArguments* args, TRAPS) {
+  JfrJavaCall::call_virtual(args, THREAD);
+}
+
+void JfrJavaSupport::notify_all(jobject object, TRAPS) {
+  assert(object != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  HandleMark hm(THREAD);
+  Handle h_obj(THREAD, resolve_non_null(object));
+  assert(h_obj.not_null(), "invariant");
+  ObjectSynchronizer::jni_enter(h_obj, THREAD);
+  ObjectSynchronizer::notifyall(h_obj, THREAD);
+  ObjectSynchronizer::jni_exit(h_obj(), THREAD);
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+}
+
+/*
+ *  Object construction
+ */
+static void object_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, TRAPS) {
+  assert(args != NULL, "invariant");
+  assert(result != NULL, "invariant");
+  assert(klass != NULL, "invariant");
+  assert(klass->is_initialized(), "invariant");
+
+  HandleMark hm(THREAD);
+  instanceOop obj = klass->allocate_instance(CHECK);
+  instanceHandle h_obj(THREAD, obj);
+  assert(h_obj.not_null(), "invariant");
+  args->set_receiver(h_obj);
+  result->set_type(T_VOID); // constructor result type
+  JfrJavaSupport::call_special(args, CHECK);
+  result->set_type(T_OBJECT); // set back to original result type
+  result->set_jobject((jobject)h_obj());
+}
+
+static void array_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, int array_length, TRAPS) {
+  assert(args != NULL, "invariant");
+  assert(result != NULL, "invariant");
+  assert(klass != NULL, "invariant");
+  assert(klass->is_initialized(), "invariant");
+
+  Klass* const ak = klass->array_klass(THREAD);
+  ObjArrayKlass::cast(ak)->initialize(THREAD);
+  HandleMark hm(THREAD);
+  objArrayOop arr = ObjArrayKlass::cast(ak)->allocate(array_length, CHECK);
+  result->set_jobject((jobject)arr);
+}
+
+static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) {
+  assert(args != NULL, "invariant");
+  assert(result != NULL, "invariant");
+  assert(result->get_type() == T_OBJECT, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  InstanceKlass* const klass = static_cast<InstanceKlass*>(args->klass());
+  klass->initialize(CHECK);
+
+  const int array_length = args->array_length();
+
+  if (array_length > 0) {
+    array_construction(args, result, klass, array_length, CHECK);
+  } else {
+    object_construction(args, result, klass, THREAD);
+  }
+}
+
+static void handle_result(JavaValue* result, bool global_ref, Thread* t) {
+  assert(result != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t));
+  const oop result_oop = (const oop)result->get_jobject();
+  if (result_oop == NULL) {
+    return;
+  }
+  result->set_jobject(global_ref ?
+                      JfrJavaSupport::global_jni_handle(result_oop, t) :
+                      JfrJavaSupport::local_jni_handle(result_oop, t));
+}
+
+void JfrJavaSupport::new_object(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  create_object(args, args->result(), THREAD);
+}
+
+void JfrJavaSupport::new_object_local_ref(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  JavaValue* const result = args->result();
+  assert(result != NULL, "invariant");
+  create_object(args, result, CHECK);
+  handle_result(result, false, THREAD);
+}
+
+void JfrJavaSupport::new_object_global_ref(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  JavaValue* const result = args->result();
+  assert(result != NULL, "invariant");
+  create_object(args, result, CHECK);
+  handle_result(result, true, THREAD);
+}
+
+jstring JfrJavaSupport::new_string(const char* c_str, TRAPS) {
+  assert(c_str != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  const oop result = java_lang_String::create_oop_from_str(c_str, THREAD);
+  return (jstring)local_jni_handle(result, THREAD);
+}
+
+jobjectArray JfrJavaSupport::new_string_array(int length, TRAPS) {
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments args(&result, "java/lang/String", "<init>", "()V", CHECK_NULL);
+  args.set_array_length(length);
+  new_object_local_ref(&args, THREAD);
+  return (jobjectArray)args.result()->get_jobject();
+}
+
+jobject JfrJavaSupport::new_java_lang_Boolean(bool value, TRAPS) {
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments args(&result, "java/lang/Boolean", "<init>", "(Z)V", CHECK_NULL);
+  args.push_int(value ? (jint)JNI_TRUE : (jint)JNI_FALSE);
+  new_object_local_ref(&args, THREAD);
+  return args.result()->get_jobject();
+}
+
+jobject JfrJavaSupport::new_java_lang_Integer(jint value, TRAPS) {
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments args(&result, "java/lang/Integer", "<init>", "(I)V", CHECK_NULL);
+  args.push_int(value);
+  new_object_local_ref(&args, THREAD);
+  return args.result()->get_jobject();
+}
+
+jobject JfrJavaSupport::new_java_lang_Long(jlong value, TRAPS) {
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments args(&result, "java/lang/Long", "<init>", "(J)V", CHECK_NULL);
+  args.push_long(value);
+  new_object_local_ref(&args, THREAD);
+  return args.result()->get_jobject();
+}
+
+void JfrJavaSupport::set_array_element(jobjectArray arr, jobject element, int index, Thread* t) {
+  assert(arr != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+  HandleMark hm(t);
+  objArrayHandle a(t, (objArrayOop)resolve_non_null(arr));
+  a->obj_at_put(index, resolve_non_null(element));
+}
+
+/*
+ *  Field access
+ */
+static void write_int_field(const Handle& h_oop, fieldDescriptor* fd, jint value) {
+  assert(h_oop.not_null(), "invariant");
+  assert(fd != NULL, "invariant");
+  h_oop->int_field_put(fd->offset(), value);
+}
+
+static void write_float_field(const Handle& h_oop, fieldDescriptor* fd, jfloat value) {
+  assert(h_oop.not_null(), "invariant");
+  assert(fd != NULL, "invariant");
+  h_oop->float_field_put(fd->offset(), value);
+}
+
+static void write_double_field(const Handle& h_oop, fieldDescriptor* fd, jdouble value) {
+  assert(h_oop.not_null(), "invariant");
+  assert(fd != NULL, "invariant");
+  h_oop->double_field_put(fd->offset(), value);
+}
+
+static void write_long_field(const Handle& h_oop, fieldDescriptor* fd, jlong value) {
+  assert(h_oop.not_null(), "invariant");
+  assert(fd != NULL, "invariant");
+  h_oop->long_field_put(fd->offset(), value);
+}
+
+static void write_oop_field(const Handle& h_oop, fieldDescriptor* fd, const oop value) {
+  assert(h_oop.not_null(), "invariant");
+  assert(fd != NULL, "invariant");
+  h_oop->obj_field_put(fd->offset(), value);
+}
+
+static void write_specialized_field(JfrJavaArguments* args, const Handle& h_oop, fieldDescriptor* fd, bool static_field) {
+  assert(args != NULL, "invariant");
+  assert(h_oop.not_null(), "invariant");
+  assert(fd != NULL, "invariant");
+  assert(fd->offset() > 0, "invariant");
+  assert(args->length() >= 1, "invariant");
+
+  // attempt must set a real value
+  assert(args->param(1).get_type() != T_VOID, "invariant");
+
+  switch(fd->field_type()) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_SHORT:
+    case T_INT:
+      write_int_field(h_oop, fd, args->param(1).get_jint());
+      break;
+    case T_FLOAT:
+      write_float_field(h_oop, fd, args->param(1).get_jfloat());
+      break;
+    case T_DOUBLE:
+      write_double_field(h_oop, fd, args->param(1).get_jdouble());
+      break;
+    case T_LONG:
+      write_long_field(h_oop, fd, args->param(1).get_jlong());
+      break;
+    case T_OBJECT:
+      write_oop_field(h_oop, fd, (oop)args->param(1).get_jobject());
+      break;
+    case T_ADDRESS:
+      write_oop_field(h_oop, fd, JfrJavaSupport::resolve_non_null(args->param(1).get_jobject()));
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+static void read_specialized_field(JavaValue* result, const Handle& h_oop, fieldDescriptor* fd) {
+  assert(result != NULL, "invariant");
+  assert(h_oop.not_null(), "invariant");
+  assert(fd != NULL, "invariant");
+  assert(fd->offset() > 0, "invariant");
+
+  switch(fd->field_type()) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_SHORT:
+    case T_INT:
+      result->set_jint(h_oop->int_field(fd->offset()));
+      break;
+    case T_FLOAT:
+      result->set_jfloat(h_oop->float_field(fd->offset()));
+      break;
+    case T_DOUBLE:
+      result->set_jdouble(h_oop->double_field(fd->offset()));
+      break;
+    case T_LONG:
+      result->set_jlong(h_oop->long_field(fd->offset()));
+      break;
+    case T_OBJECT:
+      result->set_jobject((jobject)h_oop->obj_field(fd->offset()));
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+static bool find_field(InstanceKlass* ik,
+                       Symbol* name_symbol,
+                       Symbol* signature_symbol,
+                       fieldDescriptor* fd,
+                       bool is_static = false,
+                       bool allow_super = false) {
+  if (allow_super || is_static) {
+    return ik->find_field(name_symbol, signature_symbol, is_static, fd) != NULL;
+  }
+  return ik->find_local_field(name_symbol, signature_symbol, fd);
+}
+
+static void lookup_field(JfrJavaArguments* args, InstanceKlass* klass, fieldDescriptor* fd, bool static_field) {
+  assert(args != NULL, "invariant");
+  assert(klass != NULL, "invariant");
+  assert(klass->is_initialized(), "invariant");
+  assert(fd != NULL, "invariant");
+  find_field(klass, args->name(), args->signature(), fd, static_field, true);
+}
+
+static void read_field(JfrJavaArguments* args, JavaValue* result, TRAPS) {
+  assert(args != NULL, "invariant");
+  assert(result != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  InstanceKlass* const klass = static_cast<InstanceKlass*>(args->klass());
+  klass->initialize(CHECK);
+  const bool static_field = !args->has_receiver();
+  fieldDescriptor fd;
+  lookup_field(args, klass, &fd, static_field);
+  assert(fd.offset() > 0, "invariant");
+
+  HandleMark hm(THREAD);
+  Handle h_oop(static_field ? Handle(THREAD, klass->java_mirror()) : Handle(THREAD, args->receiver()));
+  read_specialized_field(result, h_oop, &fd);
+}
+
+static void write_field(JfrJavaArguments* args, JavaValue* result, TRAPS) {
+  assert(args != NULL, "invariant");
+  assert(result != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+
+  InstanceKlass* const klass = static_cast<InstanceKlass*>(args->klass());
+  klass->initialize(CHECK);
+
+  const bool static_field = !args->has_receiver();
+  fieldDescriptor fd;
+  lookup_field(args, klass, &fd, static_field);
+  assert(fd.offset() > 0, "invariant");
+
+  HandleMark hm(THREAD);
+  Handle h_oop(static_field ? Handle(THREAD, klass->java_mirror()) : Handle(THREAD, args->receiver()));
+  write_specialized_field(args, h_oop, &fd, static_field);
+}
+
+void JfrJavaSupport::set_field(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  write_field(args, args->result(), THREAD);
+}
+
+void JfrJavaSupport::get_field(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  read_field(args, args->result(), THREAD);
+}
+
+void JfrJavaSupport::get_field_local_ref(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+
+  JavaValue* const result = args->result();
+  assert(result != NULL, "invariant");
+  assert(result->get_type() == T_OBJECT, "invariant");
+
+  read_field(args, result, CHECK);
+  const oop obj = (const oop)result->get_jobject();
+
+  if (obj != NULL) {
+    result->set_jobject(local_jni_handle(obj, THREAD));
+  }
+}
+
+void JfrJavaSupport::get_field_global_ref(JfrJavaArguments* args, TRAPS) {
+  assert(args != NULL, "invariant");
+  DEBUG_ONLY(check_java_thread_in_vm(THREAD));
+
+  JavaValue* const result = args->result();
+  assert(result != NULL, "invariant");
+  assert(result->get_type() == T_OBJECT, "invariant");
+  read_field(args, result, CHECK);
+  const oop obj = (const oop)result->get_jobject();
+  if (obj != NULL) {
+    result->set_jobject(global_jni_handle(obj, THREAD));
+  }
+}
+
+/*
+ *  Misc
+ */
+Klass* JfrJavaSupport::klass(const jobject handle) {
+  const oop obj = resolve_non_null(handle);
+  assert(obj != NULL, "invariant");
+  return obj->klass();
+}
+
+// caller needs ResourceMark
+const char* JfrJavaSupport::c_str(jstring string, Thread* t) {
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+  if (string == NULL) {
+    return NULL;
+  }
+  const char* temp = NULL;
+  const oop java_string = resolve_non_null(string);
+  if (java_lang_String::value(java_string) != NULL) {
+    const size_t length = java_lang_String::utf8_length(java_string);
+    temp = NEW_RESOURCE_ARRAY_IN_THREAD(t, const char, (length + 1));
+    if (temp == NULL) {
+       JfrJavaSupport::throw_out_of_memory_error("Unable to allocate thread local native memory", t);
+       return NULL;
+    }
+    assert(temp != NULL, "invariant");
+    java_lang_String::as_utf8_string(java_string, const_cast<char*>(temp), (int) length + 1);
+  }
+  return temp;
+}
+
+/*
+ *  Exceptions and errors
+ */
+static void create_and_throw(Symbol* name, const char* message, TRAPS) {
+  assert(name != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  assert(!HAS_PENDING_EXCEPTION, "invariant");
+  THROW_MSG(name, message);
+}
+
+void JfrJavaSupport::throw_illegal_state_exception(const char* message, TRAPS) {
+  create_and_throw(vmSymbols::java_lang_IllegalStateException(), message, THREAD);
+}
+
+void JfrJavaSupport::throw_internal_error(const char* message, TRAPS) {
+  create_and_throw(vmSymbols::java_lang_InternalError(), message, THREAD);
+}
+
+void JfrJavaSupport::throw_illegal_argument_exception(const char* message, TRAPS) {
+  create_and_throw(vmSymbols::java_lang_IllegalArgumentException(), message, THREAD);
+}
+
+void JfrJavaSupport::throw_out_of_memory_error(const char* message, TRAPS) {
+  create_and_throw(vmSymbols::java_lang_OutOfMemoryError(), message, THREAD);
+}
+
+void JfrJavaSupport::throw_class_format_error(const char* message, TRAPS) {
+  create_and_throw(vmSymbols::java_lang_ClassFormatError(), message, THREAD);
+}
+
+void JfrJavaSupport::abort(jstring errorMsg, Thread* t) {
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+
+  ResourceMark rm(t);
+  const char* const error_msg = c_str(errorMsg, t);
+  if (error_msg != NULL) {
+    if (true) tty->print_cr("%s",error_msg);
+  }
+  if (true) tty->print_cr("%s", "An irrecoverable error in Jfr. Shutting down VM...");
+  vm_abort();
+}
+
+JfrJavaSupport::CAUSE JfrJavaSupport::_cause = JfrJavaSupport::VM_ERROR;
+void JfrJavaSupport::set_cause(jthrowable throwable, Thread* t) {
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+
+  HandleMark hm(t);
+  Handle ex(t, JNIHandles::resolve_external_guard(throwable));
+
+  if (ex.is_null()) {
+    return;
+  }
+
+  if (ex->is_a(SystemDictionary::OutOfMemoryError_klass())) {
+    _cause = OUT_OF_MEMORY;
+    return;
+  }
+  if (ex->is_a(SystemDictionary::StackOverflowError_klass())) {
+    _cause = STACK_OVERFLOW;
+    return;
+  }
+  if (ex->is_a(SystemDictionary::Error_klass())) {
+    _cause = VM_ERROR;
+    return;
+  }
+  if (ex->is_a(SystemDictionary::RuntimeException_klass())) {
+    _cause = RUNTIME_EXCEPTION;
+    return;
+  }
+  if (ex->is_a(SystemDictionary::Exception_klass())) {
+    _cause = UNKNOWN;
+    return;
+  }
+}
+
+void JfrJavaSupport::uncaught_exception(jthrowable throwable, Thread* t) {
+  DEBUG_ONLY(check_java_thread_in_vm(t));
+  assert(throwable != NULL, "invariant");
+  set_cause(throwable, t);
+}
+
+JfrJavaSupport::CAUSE JfrJavaSupport::cause() {
+  return _cause;
+}
+
+// XXX
+//const char* const JDK_JFR_MODULE_NAME = "jdk.jfr";
+//const char* const JDK_JFR_PACKAGE_NAME = "jdk/jfr";
+
+jlong JfrJavaSupport::jfr_thread_id(jobject target_thread) {
+//  ThreadsListHandle tlh;
+  // XXX is it correct and safe?
+  JavaThread* native_thread = java_lang_Thread::thread(JNIHandles::resolve_non_null(target_thread));
+//  (void)tlh.cv_internal_thread_to_JavaThread(target_thread, &native_thread, NULL);
+  return native_thread != NULL ? JFR_THREAD_ID(native_thread) : 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJavaSupport.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JNI_JFRJAVASUPPORT_HPP
+#define SHARE_VM_JFR_JNI_JFRJAVASUPPORT_HPP
+
+#include "jfr/jni/jfrJavaCall.hpp"
+#include "utilities/exceptions.hpp"
+
+class Klass;
+class JavaThread;
+class outputStream;
+
+class JfrJavaSupport : public AllStatic {
+ public:
+  static jobject local_jni_handle(const oop obj, Thread* t);
+  static jobject local_jni_handle(const jobject handle, Thread* t);
+  static void destroy_local_jni_handle(const jobject handle);
+
+  static jobject global_jni_handle(const oop obj, Thread* t);
+  static jobject global_jni_handle(const jobject handle, Thread* t);
+  static void destroy_global_jni_handle(const jobject handle);
+
+  static oop resolve_non_null(jobject obj);
+  static void notify_all(jobject obj, TRAPS);
+  static void set_array_element(jobjectArray arr, jobject element, int index, Thread* t);
+
+  // naked oop result
+  static void call_static(JfrJavaArguments* args, TRAPS);
+  static void call_special(JfrJavaArguments* args, TRAPS);
+  static void call_virtual(JfrJavaArguments* args, TRAPS);
+
+  static void set_field(JfrJavaArguments* args, TRAPS);
+  static void get_field(JfrJavaArguments* args, TRAPS);
+  static void new_object(JfrJavaArguments* args, TRAPS);
+
+  // global jni handle result
+  static void new_object_global_ref(JfrJavaArguments* args, TRAPS);
+  static void get_field_global_ref(JfrJavaArguments* args, TRAPS);
+
+  // local jni handle result
+  static void new_object_local_ref(JfrJavaArguments* args, TRAPS);
+  static void get_field_local_ref(JfrJavaArguments* args, TRAPS);
+
+  static jstring new_string(const char* c_str, TRAPS);
+  static jobjectArray new_string_array(int length, TRAPS);
+
+  static jobject new_java_lang_Boolean(bool value, TRAPS);
+  static jobject new_java_lang_Integer(jint value, TRAPS);
+  static jobject new_java_lang_Long(jlong value, TRAPS);
+
+  // misc
+  static Klass* klass(const jobject handle);
+  // caller needs ResourceMark
+  static const char* c_str(jstring string, Thread* jt);
+
+  // exceptions
+  static void throw_illegal_state_exception(const char* message, TRAPS);
+  static void throw_illegal_argument_exception(const char* message, TRAPS);
+  static void throw_internal_error(const char* message, TRAPS);
+  static void throw_out_of_memory_error(const char* message, TRAPS);
+  static void throw_class_format_error(const char* message, TRAPS);
+
+  static jlong jfr_thread_id(jobject target_thread);
+
+  // critical
+  static void abort(jstring errorMsg, TRAPS);
+  static void uncaught_exception(jthrowable throwable, Thread* t);
+
+  // asserts
+  DEBUG_ONLY(static void check_java_thread_in_vm(Thread* t);)
+  DEBUG_ONLY(static void check_java_thread_in_native(Thread* t);)
+
+  enum CAUSE {
+    VM_ERROR,
+    OUT_OF_MEMORY,
+    STACK_OVERFLOW,
+    RUNTIME_EXCEPTION,
+    UNKNOWN,
+    NOF_CAUSES
+  };
+
+  static CAUSE cause();
+
+ private:
+  static CAUSE _cause;
+  static void set_cause(jthrowable throwable, Thread* t);
+};
+
+#endif // SHARE_VM_JFR_JNI_JFRJAVASUPPORT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJniMethod.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jni.h"
+#include "jvm.h"
+#include "jfr/jfr.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
+#include "jfr/recorder/jfrEventSetting.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/repository/jfrRepository.hpp"
+#include "jfr/recorder/repository/jfrChunkSizeNotifier.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/recorder/stringpool/jfrStringPool.hpp"
+#include "jfr/jni/jfrGetAllEventClasses.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/jni/jfrJniMethodRegistration.hpp"
+#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
+#include "jfr/instrumentation/jfrJvmtiAgent.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/utilities/jfrJavaLog.hpp"
+#include "jfr/utilities/jfrTimeConverter.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/writers/jfrJavaEventWriter.hpp"
+#include "jfrfiles/jfrPeriodic.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+
+#define NO_TRANSITION(result_type, header) extern "C" { result_type JNICALL header {
+#define NO_TRANSITION_END } }
+
+/*
+ * NO_TRANSITION entries
+ *
+ * Thread remains _thread_in_native
+ */
+
+NO_TRANSITION(void, jfr_register_natives(JNIEnv* env, jclass jvmclass))
+  JfrJniMethodRegistration register_native_methods(env);
+NO_TRANSITION_END
+
+NO_TRANSITION(jboolean, jfr_is_enabled())
+  return Jfr::is_enabled() ? JNI_TRUE : JNI_FALSE;
+NO_TRANSITION_END
+
+NO_TRANSITION(jboolean, jfr_is_disabled())
+  return Jfr::is_disabled() ? JNI_TRUE : JNI_FALSE;
+NO_TRANSITION_END
+
+NO_TRANSITION(jboolean, jfr_is_started())
+  return JfrRecorder::is_created() ? JNI_TRUE : JNI_FALSE;
+NO_TRANSITION_END
+
+NO_TRANSITION(jstring, jfr_get_pid(JNIEnv* env, jobject jvm))
+  char pid_buf[32] = { 0 };
+  jio_snprintf(pid_buf, sizeof(pid_buf), "%d", os::current_process_id());
+  jstring pid_string = env->NewStringUTF(pid_buf);
+  return pid_string; // exception pending if NULL
+NO_TRANSITION_END
+
+NO_TRANSITION(jlong, jfr_elapsed_frequency(JNIEnv* env, jobject jvm))
+  return JfrTime::frequency();
+NO_TRANSITION_END
+
+NO_TRANSITION(jlong, jfr_elapsed_counter(JNIEnv* env, jobject jvm))
+  return JfrTicks::now();
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_retransform_classes(JNIEnv* env, jobject jvm, jobjectArray classes))
+  JfrJvmtiAgent::retransform_classes(env, classes, JavaThread::thread_from_jni_environment(env));
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled))
+  JfrEventSetting::set_enabled(event_type_id, JNI_TRUE == enabled);
+  if (EventOldObjectSample::eventId == event_type_id) {
+    ThreadInVMfromNative transition(JavaThread::thread_from_jni_environment(env));
+    if (JNI_TRUE == enabled) {
+      LeakProfiler::start(JfrOptionSet::old_object_queue_size());
+    } else {
+      LeakProfiler::stop();
+    }
+  }
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_file_notification(JNIEnv* env, jobject jvm, jlong threshold))
+  JfrChunkSizeNotifier::set_chunk_size_threshold((size_t)threshold);
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_sample_threads(JNIEnv* env, jobject jvm, jboolean sampleThreads))
+  JfrOptionSet::set_sample_threads(sampleThreads);
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_stack_depth(JNIEnv* env, jobject jvm, jint depth))
+  JfrOptionSet::set_stackdepth((jlong)depth);
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_stacktrace_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled))
+  JfrEventSetting::set_stacktrace(event_type_id, JNI_TRUE == enabled);
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_global_buffer_count(JNIEnv* env, jobject jvm, jlong count))
+  JfrOptionSet::set_num_global_buffers(count);
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_global_buffer_size(JNIEnv* env, jobject jvm, jlong size))
+JfrOptionSet::set_global_buffer_size(size);
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_thread_buffer_size(JNIEnv* env, jobject jvm, jlong size))
+  JfrOptionSet::set_thread_buffer_size(size);
+NO_TRANSITION_END
+
+NO_TRANSITION(void, jfr_set_memory_size(JNIEnv* env, jobject jvm, jlong size))
+  JfrOptionSet::set_memory_size(size);
+NO_TRANSITION_END
+
+NO_TRANSITION(jboolean, jfr_set_threshold(JNIEnv* env, jobject jvm, jlong event_type_id, jlong thresholdTicks))
+  return JfrEventSetting::set_threshold(event_type_id, thresholdTicks) ? JNI_TRUE : JNI_FALSE;
+NO_TRANSITION_END
+
+NO_TRANSITION(jboolean, jfr_allow_event_retransforms(JNIEnv* env, jobject jvm))
+  return JfrOptionSet::allow_event_retransforms() ? JNI_TRUE : JNI_FALSE;
+NO_TRANSITION_END
+
+NO_TRANSITION(jboolean, jfr_is_available(JNIEnv* env, jclass jvm))
+  return !Jfr::is_disabled() ? JNI_TRUE : JNI_FALSE;
+NO_TRANSITION_END
+
+NO_TRANSITION(jlong, jfr_get_epoch_address(JNIEnv* env, jobject jvm))
+  return JfrTraceIdEpoch::epoch_address();
+NO_TRANSITION_END
+
+NO_TRANSITION(jlong, jfr_get_unloaded_event_classes_count(JNIEnv* env, jobject jvm))
+  return JfrEventClasses::unloaded_event_classes_count();
+NO_TRANSITION_END
+
+NO_TRANSITION(jdouble, jfr_time_conv_factor(JNIEnv* env, jobject jvm))
+  return (jdouble)JfrTimeConverter::nano_to_counter_multiplier();
+NO_TRANSITION_END
+
+NO_TRANSITION(jboolean, jfr_set_cutoff(JNIEnv* env, jobject jvm, jlong event_type_id, jlong cutoff_ticks))
+  return JfrEventSetting::set_cutoff(event_type_id, cutoff_ticks) ? JNI_TRUE : JNI_FALSE;
+NO_TRANSITION_END
+
+
+/*
+ * JVM_ENTRY_NO_ENV entries
+ *
+ * Transitions:
+ *   Entry: _thread_in_native -> _thread_in_vm
+ *   Exit:  _thread_in_vm -> _thread_in_native
+ *
+ * Current JavaThread available as "thread" variable
+ */
+
+JVM_ENTRY_NO_ENV(jboolean, jfr_create_jfr(JNIEnv* env, jobject jvm, jboolean simulate_failure))
+  if (JfrRecorder::is_created()) {
+    return JNI_TRUE;
+  }
+  if (!JfrRecorder::create(simulate_failure == JNI_TRUE)) {
+    JfrJavaSupport::throw_illegal_state_exception("Unable to start Jfr", thread);
+    return JNI_FALSE;
+  }
+  return JNI_TRUE;
+JVM_END
+
+JVM_ENTRY_NO_ENV(jboolean, jfr_destroy_jfr(JNIEnv* env, jobject jvm))
+  JfrRecorder::destroy();
+  return JNI_TRUE;
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_begin_recording(JNIEnv* env, jobject jvm))
+  if (JfrRecorder::is_recording()) {
+    return;
+  }
+  JfrRecorder::start_recording();
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_end_recording(JNIEnv* env, jobject jvm))
+  if (!JfrRecorder::is_recording()) {
+    return;
+  }
+  JfrRecorder::stop_recording();
+JVM_END
+
+
+JVM_ENTRY_NO_ENV(jboolean, jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventTypeId, jlong timeStamp, jlong when))
+  JfrPeriodicEventSet::requestEvent((JfrEventId)eventTypeId);
+  return thread->has_pending_exception() ? JNI_FALSE : JNI_TRUE;
+JVM_END
+
+JVM_ENTRY_NO_ENV(jobject, jfr_get_all_event_classes(JNIEnv* env, jobject jvm))
+  return JfrEventClasses::get_all_event_classes(thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(jlong, jfr_class_id(JNIEnv* env, jclass jvm, jclass jc))
+  return JfrTraceId::use(jc);
+JVM_END
+
+JVM_ENTRY_NO_ENV(jlong, jfr_stacktrace_id(JNIEnv* env, jobject jvm, jint skip))
+  return JfrStackTraceRepository::record(thread, skip);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_log(JNIEnv* env, jobject jvm, jint tag_set, jint level, jstring message))
+ JfrJavaLog::log(tag_set, level, message, thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_subscribe_log_level(JNIEnv* env, jobject jvm, jobject log_tag, jint id))
+ JfrJavaLog::subscribe_log_level(log_tag, id, thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_set_output(JNIEnv* env, jobject jvm, jstring path))
+  JfrRepository::set_chunk_path(path, thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_set_method_sampling_interval(JNIEnv* env, jobject jvm, jlong type, jlong intervalMillis))
+  if (intervalMillis < 0) {
+    intervalMillis = 0;
+  }
+  JfrEventId typed_event_id = (JfrEventId)type;
+  assert(EventExecutionSample::eventId == typed_event_id || EventNativeMethodSample::eventId == typed_event_id, "invariant");
+  if (intervalMillis > 0) {
+    JfrEventSetting::set_enabled(typed_event_id, true); // ensure sampling event is enabled
+  }
+  if (EventExecutionSample::eventId == type) {
+    JfrThreadSampling::set_java_sample_interval(intervalMillis);
+  } else {
+    JfrThreadSampling::set_native_sample_interval(intervalMillis);
+  }
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_store_metadata_descriptor(JNIEnv* env, jobject jvm, jbyteArray descriptor))
+  JfrMetadataEvent::update(descriptor);
+JVM_END
+
+// trace thread id for a thread object
+JVM_ENTRY_NO_ENV(jlong, jfr_id_for_thread(JNIEnv* env, jobject jvm, jobject t))
+  return JfrJavaSupport::jfr_thread_id(t);
+JVM_END
+
+JVM_ENTRY_NO_ENV(jobject, jfr_get_event_writer(JNIEnv* env, jclass cls))
+  return JfrJavaEventWriter::event_writer(thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(jobject, jfr_new_event_writer(JNIEnv* env, jclass cls))
+  return JfrJavaEventWriter::new_event_writer(thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(jboolean, jfr_event_writer_flush(JNIEnv* env, jclass cls, jobject writer, jint used_size, jint requested_size))
+  return JfrJavaEventWriter::flush(writer, used_size, requested_size, thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_set_repository_location(JNIEnv* env, jobject repo, jstring location))
+  return JfrRepository::set_path(location, thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_uncaught_exception(JNIEnv* env, jobject jvm, jobject t, jthrowable throwable))
+  JfrJavaSupport::uncaught_exception(throwable, thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_abort(JNIEnv* env, jobject jvm, jstring errorMsg))
+  JfrJavaSupport::abort(errorMsg, thread);
+JVM_END
+
+JVM_ENTRY_NO_ENV(jlong, jfr_type_id(JNIEnv* env, jobject jvm, jclass jc))
+  return JfrTraceId::get(jc);
+JVM_END
+
+JVM_ENTRY_NO_ENV(jboolean, jfr_add_string_constant(JNIEnv* env, jclass jvm, jboolean epoch, jlong id, jstring string))
+  return JfrStringPool::add(epoch == JNI_TRUE, id, string, thread) ? JNI_TRUE : JNI_FALSE;
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_set_force_instrumentation(JNIEnv* env, jobject jvm, jboolean force_instrumentation))
+  JfrEventClassTransformer::set_force_instrumentation(force_instrumentation == JNI_TRUE);
+JVM_END
+
+JVM_ENTRY_NO_ENV(void, jfr_emit_old_object_samples(JNIEnv* env, jobject jvm, jlong cutoff_ticks, jboolean emit_all))
+  LeakProfiler::emit_events(cutoff_ticks, emit_all == JNI_TRUE);
+JVM_END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJniMethod.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JNI_JFRJNIMETHOD_HPP
+#define SHARE_VM_JFR_JNI_JFRJNIMETHOD_HPP
+
+#include "jni.h"
+
+/*
+ * Native methods for jdk.jfr.internal.JVM
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+jboolean JNICALL jfr_is_enabled();
+
+jboolean JNICALL jfr_is_disabled();
+
+jboolean JNICALL jfr_is_started();
+
+jlong JNICALL jfr_elapsed_counter(JNIEnv* env, jobject jvm);
+
+jboolean JNICALL jfr_create_jfr(JNIEnv* env, jobject jvm, jboolean simulate_failure);
+
+jboolean JNICALL jfr_destroy_jfr(JNIEnv* env, jobject jvm);
+
+void JNICALL jfr_begin_recording(JNIEnv* env, jobject jvm);
+
+void JNICALL jfr_end_recording(JNIEnv* env, jobject jvm);
+
+jboolean JNICALL jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventTypeId, jlong timeStamp, jlong when);
+
+jobject JNICALL jfr_get_all_event_classes(JNIEnv* env, jobject jvm);
+
+jlong JNICALL jfr_class_id(JNIEnv* env, jclass jvm, jclass jc);
+
+jstring JNICALL jfr_get_pid(JNIEnv* env, jobject jvm);
+
+jlong JNICALL jfr_stacktrace_id(JNIEnv* env, jobject jvm, jint skip);
+
+jlong JNICALL jfr_elapsed_frequency(JNIEnv* env, jobject jvm);
+
+void JNICALL jfr_subscribe_log_level(JNIEnv* env, jobject jvm, jobject log_tag, jint id);
+
+void JNICALL jfr_log(JNIEnv* env, jobject jvm, jint tag_set, jint level, jstring message);
+
+void JNICALL jfr_retransform_classes(JNIEnv* env, jobject jvm, jobjectArray classes);
+
+void JNICALL jfr_set_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled);
+
+void JNICALL jfr_set_file_notification(JNIEnv* env, jobject jvm, jlong delta);
+
+void JNICALL jfr_set_global_buffer_count(JNIEnv* env, jobject jvm, jlong count);
+
+void JNICALL jfr_set_global_buffer_size(JNIEnv* env, jobject jvm, jlong size);
+
+void JNICALL jfr_set_method_sampling_interval(JNIEnv* env, jobject jvm, jlong type, jlong intervalMillis);
+
+void JNICALL jfr_set_output(JNIEnv* env, jobject jvm, jstring path);
+
+void JNICALL jfr_set_sample_threads(JNIEnv* env, jobject jvm, jboolean sampleThreads);
+
+void JNICALL jfr_set_stack_depth(JNIEnv* env, jobject jvm, jint depth);
+
+void JNICALL jfr_set_stacktrace_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled);
+
+void JNICALL jfr_set_thread_buffer_size(JNIEnv* env, jobject jvm, jlong size);
+
+void JNICALL jfr_set_memory_size(JNIEnv* env, jobject jvm, jlong size);
+
+jboolean JNICALL jfr_set_threshold(JNIEnv* env, jobject jvm, jlong event_type_id, jlong thresholdTicks);
+
+void JNICALL jfr_store_metadata_descriptor(JNIEnv* env, jobject jvm, jbyteArray descriptor);
+
+jlong JNICALL jfr_id_for_thread(JNIEnv* env, jobject jvm, jobject t);
+
+jboolean JNICALL jfr_allow_event_retransforms(JNIEnv* env, jobject jvm);
+
+jboolean JNICALL jfr_is_available(JNIEnv* env, jclass jvm);
+
+jdouble JNICALL jfr_time_conv_factor(JNIEnv* env, jobject jvm);
+
+jlong JNICALL jfr_type_id(JNIEnv* env, jobject jvm, jclass jc);
+
+void JNICALL jfr_set_repository_location(JNIEnv* env, jobject repo, jstring location);
+
+jobject JNICALL jfr_get_event_writer(JNIEnv* env, jclass cls);
+
+jobject JNICALL jfr_new_event_writer(JNIEnv* env, jclass cls);
+
+jboolean JNICALL jfr_event_writer_flush(JNIEnv* env, jclass cls, jobject writer, jint used_size, jint requested_size);
+
+void JNICALL jfr_abort(JNIEnv* env, jobject jvm, jstring errorMsg);
+
+jlong JNICALL jfr_get_epoch_address(JNIEnv* env, jobject jvm);
+
+jboolean JNICALL jfr_add_string_constant(JNIEnv* env, jclass jvm, jboolean epoch, jlong id, jstring string);
+
+void JNICALL jfr_uncaught_exception(JNIEnv* env, jobject jvm, jobject thread, jthrowable throwable);
+
+void JNICALL jfr_set_force_instrumentation(JNIEnv* env, jobject jvm, jboolean force);
+
+jlong JNICALL jfr_get_unloaded_event_classes_count(JNIEnv* env, jobject jvm);
+
+jboolean JNICALL jfr_set_cutoff(JNIEnv* env, jobject jvm, jlong event_type_id, jlong cutoff_ticks);
+
+void JNICALL jfr_emit_old_object_samples(JNIEnv* env, jobject jvm, jlong cutoff_ticks, jboolean);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // SHARE_VM_JFR_JNI_JFRJNIMETHOD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jni/jfrJniMethod.hpp"
+#include "jfr/jni/jfrJniMethodRegistration.hpp"
+#include "jfr/utilities/jfrJavaLog.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/exceptions.hpp"
+
+JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) {
+  assert(env != NULL, "invariant");
+  jclass jfr_clz = env->FindClass("jdk/jfr/internal/JVM");
+  if (jfr_clz != NULL) {
+    JNINativeMethod method[] = {
+      (char*)"beginRecording", (char*)"()V", (void*)jfr_begin_recording,
+      (char*)"endRecording", (char*)"()V", (void*)jfr_end_recording,
+      (char*)"counterTime", (char*)"()J", (void*)jfr_elapsed_counter,
+      (char*)"createJFR", (char*)"(Z)Z", (void*)jfr_create_jfr,
+      (char*)"destroyJFR", (char*)"()Z", (void*)jfr_destroy_jfr,
+      (char*)"emitEvent", (char*)"(JJJ)Z", (void*)jfr_emit_event,
+      (char*)"getAllEventClasses", (char*)"()Ljava/util/List;", (void*)jfr_get_all_event_classes,
+      (char*)"getClassIdNonIntrinsic", (char*)"(Ljava/lang/Class;)J", (void*)jfr_class_id,
+      (char*)"getPid", (char*)"()Ljava/lang/String;", (void*)jfr_get_pid,
+      (char*)"getStackTraceId", (char*)"(I)J", (void*)jfr_stacktrace_id,
+      (char*)"getThreadId", (char*)"(Ljava/lang/Thread;)J", (void*)jfr_id_for_thread,
+      (char*)"getTicksFrequency", (char*)"()J", (void*)jfr_elapsed_frequency,
+      (char*)"subscribeLogLevel", (char*)"(Ljdk/jfr/internal/LogTag;I)V", (void*)jfr_subscribe_log_level,
+      (char*)"log", (char*)"(IILjava/lang/String;)V", (void*)jfr_log,
+      (char*)"retransformClasses", (char*)"([Ljava/lang/Class;)V", (void*)jfr_retransform_classes,
+      (char*)"setEnabled", (char*)"(JZ)V", (void*)jfr_set_enabled,
+      (char*)"setFileNotification", (char*)"(J)V", (void*)jfr_set_file_notification,
+      (char*)"setGlobalBufferCount", (char*)"(J)V", (void*)jfr_set_global_buffer_count,
+      (char*)"setGlobalBufferSize", (char*)"(J)V", (void*)jfr_set_global_buffer_size,
+      (char*)"setMethodSamplingInterval", (char*)"(JJ)V", (void*)jfr_set_method_sampling_interval,
+      (char*)"setOutput", (char*)"(Ljava/lang/String;)V", (void*)jfr_set_output,
+      (char*)"setSampleThreads", (char*)"(Z)V", (void*)jfr_set_sample_threads,
+      (char*)"setStackDepth", (char*)"(I)V", (void*)jfr_set_stack_depth,
+      (char*)"setStackTraceEnabled", (char*)"(JZ)V", (void*)jfr_set_stacktrace_enabled,
+      (char*)"setThreadBufferSize", (char*)"(J)V", (void*)jfr_set_thread_buffer_size,
+      (char*)"setMemorySize", (char*)"(J)V", (void*)jfr_set_memory_size,
+      (char*)"setThreshold", (char*)"(JJ)Z", (void*)jfr_set_threshold,
+      (char*)"storeMetadataDescriptor", (char*)"([B)V", (void*)jfr_store_metadata_descriptor,
+      (char*)"getAllowedToDoEventRetransforms", (char*)"()Z", (void*)jfr_allow_event_retransforms,
+      (char*)"isAvailable", (char*)"()Z", (void*)jfr_is_available,
+      (char*)"getTimeConversionFactor", (char*)"()D", (void*)jfr_time_conv_factor,
+      (char*)"getTypeId", (char*)"(Ljava/lang/Class;)J", (void*)jfr_type_id,
+      (char*)"getEventWriter", (char*)"()Ljava/lang/Object;", (void*)jfr_get_event_writer,
+      (char*)"newEventWriter", (char*)"()Ljdk/jfr/internal/EventWriter;", (void*)jfr_new_event_writer,
+      (char*)"flush", (char*)"(Ljdk/jfr/internal/EventWriter;II)Z", (void*)jfr_event_writer_flush,
+      (char*)"setRepositoryLocation", (char*)"(Ljava/lang/String;)V", (void*)jfr_set_repository_location,
+      (char*)"abort", (char*)"(Ljava/lang/String;)V", (void*)jfr_abort,
+      (char*)"getEpochAddress", (char*)"()J",(void*)jfr_get_epoch_address,
+      (char*)"addStringConstant", (char*)"(ZJLjava/lang/String;)Z", (void*)jfr_add_string_constant,
+      (char*)"uncaughtException", (char*)"(Ljava/lang/Thread;Ljava/lang/Throwable;)V", (void*)jfr_uncaught_exception,
+      (char*)"setForceInstrumentation", (char*)"(Z)V", (void*)jfr_set_force_instrumentation,
+      (char*)"getUnloadedEventClassCount", (char*)"()J", (void*)jfr_get_unloaded_event_classes_count,
+      (char*)"setCutoff", (char*)"(JJ)Z", (void*)jfr_set_cutoff,
+      (char*)"emitOldObjectSamples", (char*)"(JZ)V", (void*)jfr_emit_old_object_samples
+    };
+
+    const size_t method_array_length = sizeof(method) / sizeof(JNINativeMethod);
+    if (env->RegisterNatives(jfr_clz, method, (jint)method_array_length) != JNI_OK) {
+      JavaThread* jt = JavaThread::thread_from_jni_environment(env);
+      assert(jt != NULL, "invariant");
+      assert(jt->thread_state() == _thread_in_native, "invariant");
+      ThreadInVMfromNative transition(jt);
+      if (true) tty->print_cr("RegisterNatives for JVM class failed!");
+    }
+    env->DeleteLocalRef(jfr_clz);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrJniMethodRegistration.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JNI_JFRJNIMETHODREGISTRATION_HPP
+#define SHARE_VM_JFR_JNI_JFRJNIMETHODREGISTRATION_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+
+//
+// RegisterNatives for jdk.jfr.internal.JVM
+//
+class JfrJniMethodRegistration : public StackObj {
+ public:
+  JfrJniMethodRegistration(JNIEnv* env);
+};
+
+#endif // SHARE_VM_JFR_JNI_JFRJNIMETHODREGISTRATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrUpcalls.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/jni/jfrUpcalls.hpp"
+#include "jfr/support/jfrEventClass.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/typeArrayKlass.hpp"
+#include "oops/typeArrayOop.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/exceptions.hpp"
+
+static Symbol* jvm_upcalls_class_sym = NULL;
+static Symbol* on_retransform_method_sym = NULL;
+static Symbol* on_retransform_signature_sym = NULL;
+static Symbol* bytes_for_eager_instrumentation_sym = NULL;
+static Symbol* bytes_for_eager_instrumentation_sig_sym = NULL;
+
+static bool initialize(TRAPS) {
+  static bool initialized = false;
+  if (!initialized) {
+    DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+    jvm_upcalls_class_sym = SymbolTable::new_permanent_symbol("jdk/jfr/internal/JVMUpcalls", CHECK_false);
+    on_retransform_method_sym = SymbolTable::new_permanent_symbol("onRetransform", CHECK_false);
+    on_retransform_signature_sym = SymbolTable::new_permanent_symbol("(JZLjava/lang/Class;[B)[B", CHECK_false);
+    bytes_for_eager_instrumentation_sym = SymbolTable::new_permanent_symbol("bytesForEagerInstrumentation", CHECK_false);
+    bytes_for_eager_instrumentation_sig_sym = SymbolTable::new_permanent_symbol("(JZLjava/lang/Class;[B)[B", THREAD);
+    initialized = bytes_for_eager_instrumentation_sig_sym != NULL;
+  }
+  return initialized;
+}
+
+static const typeArrayOop invoke(jlong trace_id,
+                                 jboolean force_instrumentation,
+                                 jclass class_being_redefined,
+                                 jint class_data_len,
+                                 const unsigned char* class_data,
+                                 Symbol* method_sym,
+                                 Symbol* signature_sym,
+                                 jint& new_bytes_length,
+                                 TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_NULL);
+  assert(klass != NULL, "invariant");
+  typeArrayOop old_byte_array = oopFactory::new_byteArray(class_data_len, CHECK_NULL);
+  memcpy(old_byte_array->byte_at_addr(0), class_data, class_data_len);
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments args(&result, klass, method_sym, signature_sym);
+  args.push_long(trace_id);
+  args.push_int(force_instrumentation);
+  args.push_jobject(class_being_redefined);
+  args.push_oop(old_byte_array);
+  JfrJavaSupport::call_static(&args, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    if (true) tty->print_cr("JfrUpcall failed");
+    return NULL;
+  }
+  // The result should be a [B
+  const oop res = (oop)result.get_jobject();
+  assert(res != NULL, "invariant");
+  assert(res->is_typeArray(), "invariant");
+  assert(TypeArrayKlass::cast(res->klass())->element_type() == T_BYTE, "invariant");
+  const typeArrayOop new_byte_array = typeArrayOop(res);
+  new_bytes_length = (jint)new_byte_array->length();
+  return new_byte_array;
+}
+
+static const size_t ERROR_MSG_BUFFER_SIZE = 256;
+static void log_error_and_throw_oom(jint new_bytes_length, TRAPS) {
+  char error_buffer[ERROR_MSG_BUFFER_SIZE];
+  jio_snprintf(error_buffer, ERROR_MSG_BUFFER_SIZE,
+    "Thread local allocation (native) for " SIZE_FORMAT " bytes failed in JfrUpcalls", (size_t)new_bytes_length);
+  if (true) tty->print_cr("%s", error_buffer);
+  JfrJavaSupport::throw_out_of_memory_error(error_buffer, CHECK);
+}
+
+void JfrUpcalls::on_retransform(jlong trace_id,
+                                jclass class_being_redefined,
+                                jint class_data_len,
+                                const unsigned char* class_data,
+                                jint* new_class_data_len,
+                                unsigned char** new_class_data,
+                                TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  assert(class_being_redefined != NULL, "invariant");
+  assert(class_data != NULL, "invariant");
+  assert(new_class_data_len != NULL, "invariant");
+  assert(new_class_data != NULL, "invariant");
+  if (!JdkJfrEvent::is_visible(class_being_redefined)) {
+    return;
+  }
+  jint new_bytes_length = 0;
+  initialize(THREAD);
+  const typeArrayOop new_byte_array = invoke(trace_id,
+                                             false,
+                                             class_being_redefined,
+                                             class_data_len,
+                                             class_data,
+                                             on_retransform_method_sym,
+                                             on_retransform_signature_sym,
+                                             new_bytes_length,
+                                             CHECK);
+  assert(new_byte_array != NULL, "invariant");
+  assert(new_bytes_length > 0, "invariant");
+  // memory space must be malloced as mtInternal
+  // as it will be deallocated by JVMTI routines
+  unsigned char* const new_bytes = (unsigned char* const)os::malloc(new_bytes_length, mtInternal);
+  if (new_bytes == NULL) {
+    log_error_and_throw_oom(new_bytes_length, THREAD); // unwinds
+  }
+  assert(new_bytes != NULL, "invariant");
+  memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length);
+  *new_class_data_len = new_bytes_length;
+  *new_class_data = new_bytes;
+}
+
+void JfrUpcalls::new_bytes_eager_instrumentation(jlong trace_id,
+                                                 jboolean force_instrumentation,
+                                                 jclass super,
+                                                 jint class_data_len,
+                                                 const unsigned char* class_data,
+                                                 jint* new_class_data_len,
+                                                 unsigned char** new_class_data,
+                                                 TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  assert(super != NULL, "invariant");
+  assert(class_data != NULL, "invariant");
+  assert(new_class_data_len != NULL, "invariant");
+  assert(new_class_data != NULL, "invariant");
+  jint new_bytes_length = 0;
+  initialize(THREAD);
+  const typeArrayOop new_byte_array = invoke(trace_id,
+                                             force_instrumentation,
+                                             super,
+                                             class_data_len,
+                                             class_data,
+                                             bytes_for_eager_instrumentation_sym,
+                                             bytes_for_eager_instrumentation_sig_sym,
+                                             new_bytes_length,
+                                             CHECK);
+  assert(new_byte_array != NULL, "invariant");
+  assert(new_bytes_length > 0, "invariant");
+  unsigned char* const new_bytes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, unsigned char, new_bytes_length);
+  if (new_bytes == NULL) {
+    log_error_and_throw_oom(new_bytes_length, THREAD); // this unwinds
+  }
+  assert(new_bytes != NULL, "invariant");
+  memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length);
+  *new_class_data_len = new_bytes_length;
+  *new_class_data = new_bytes;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/jni/jfrUpcalls.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_JNI_JFRUPCALLS_HPP
+#define SHARE_VM_JFR_JNI_JFRUPCALLS_HPP
+
+#include "jni.h"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "utilities/exceptions.hpp"
+
+class JavaThread;
+
+//
+// Upcalls to Java for instrumentation purposes.
+// Targets are located in jdk.jfr.internal.JVMUpcalls.
+//
+class JfrUpcalls : AllStatic {
+ public:
+  static void new_bytes_eager_instrumentation(jlong trace_id,
+                                              jboolean force_instrumentation,
+                                              jclass super,
+                                              jint class_data_len,
+                                              const unsigned char* class_data,
+                                              jint* new_class_data_len,
+                                              unsigned char** new_class_data,
+                                              TRAPS);
+
+  static void on_retransform(jlong trace_id,
+                             jclass class_being_redefined,
+                             jint class_data_len,
+                             const unsigned char* class_data,
+                             jint* new_class_data_len,
+                             unsigned char** new_class_data,
+                             TRAPS);
+};
+
+#endif // SHARE_VM_JFR_JNI_JFRUPCALLS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/edgeQueue.hpp"
+#include "jfr/leakprofiler/utilities/granularTimer.hpp"
+#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/align.hpp"
+
+BFSClosure::BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits) :
+  _edge_queue(edge_queue),
+  _edge_store(edge_store),
+  _mark_bits(mark_bits),
+  _current_parent(NULL),
+  _current_frontier_level(0),
+  _next_frontier_idx(0),
+  _prev_frontier_idx(0),
+  _dfs_fallback_idx(0),
+  _use_dfs(false) {
+}
+
+static void log_frontier_level_summary(size_t level,
+                                       size_t high_idx,
+                                       size_t low_idx,
+                                       size_t edge_size) {
+  const size_t nof_edges_in_frontier = high_idx - low_idx;
+  if (LogJFR && Verbose) tty->print_cr(
+      "BFS front: " SIZE_FORMAT " edges: " SIZE_FORMAT " size: " SIZE_FORMAT " [KB]",
+      level,
+      nof_edges_in_frontier,
+      (nof_edges_in_frontier * edge_size) / K
+                        );
+}
+
+void BFSClosure::log_completed_frontier() const {
+  log_frontier_level_summary(_current_frontier_level,
+                             _next_frontier_idx,
+                             _prev_frontier_idx,
+                             _edge_queue->sizeof_edge());
+}
+
+void BFSClosure::log_dfs_fallback() const {
+  const size_t edge_size = _edge_queue->sizeof_edge();
+  // first complete summary for frontier in progress
+  log_frontier_level_summary(_current_frontier_level,
+                             _next_frontier_idx,
+                             _prev_frontier_idx,
+                             edge_size);
+
+  // and then also complete the last frontier
+  log_frontier_level_summary(_current_frontier_level + 1,
+                             _edge_queue->bottom(),
+                             _next_frontier_idx,
+                             edge_size);
+
+  // additional information about DFS fallover
+  if (LogJFR && Verbose) tty->print_cr(
+      "BFS front: " SIZE_FORMAT " filled edge queue at edge: " SIZE_FORMAT,
+      _current_frontier_level,
+      _dfs_fallback_idx
+                        );
+
+  const size_t nof_dfs_completed_edges = _edge_queue->bottom() - _dfs_fallback_idx;
+  if (LogJFR && Verbose) tty->print_cr(
+      "DFS to complete " SIZE_FORMAT " edges size: " SIZE_FORMAT " [KB]",
+      nof_dfs_completed_edges,
+      (nof_dfs_completed_edges * edge_size) / K
+                        );
+}
+
+void BFSClosure::process() {
+
+  process_root_set();
+  process_queue();
+}
+
+void BFSClosure::process_root_set() {
+  for (size_t idx = _edge_queue->bottom(); idx < _edge_queue->top(); ++idx) {
+    const Edge* edge = _edge_queue->element_at(idx);
+    assert(edge->parent() == NULL, "invariant");
+    process(edge->reference(), edge->pointee());
+  }
+}
+
+void BFSClosure::process(const oop* reference, const oop pointee) {
+  closure_impl(reference, pointee);
+}
+void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
+  assert(reference != NULL, "invariant");
+  assert(UnifiedOop::dereference(reference) == pointee, "invariant");
+
+  if (GranularTimer::is_finished()) {
+     return;
+  }
+
+  if (_use_dfs) {
+    assert(_current_parent != NULL, "invariant");
+    DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, _current_parent);
+    return;
+  }
+
+  if (!_mark_bits->is_marked(pointee)) {
+    _mark_bits->mark_obj(pointee);
+    // is the pointee a sample object?
+    if (NULL == pointee->mark()) {
+      add_chain(reference, pointee);
+    }
+
+    // if we are processinig initial root set, don't add to queue
+    if (_current_parent != NULL) {
+      assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant");
+      _edge_queue->add(_current_parent, reference);
+    }
+
+    if (_edge_queue->is_full()) {
+      dfs_fallback();
+    }
+  }
+}
+
+void BFSClosure::add_chain(const oop* reference, const oop pointee) {
+  assert(pointee != NULL, "invariant");
+  assert(NULL == pointee->mark(), "invariant");
+
+  const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2;
+  ResourceMark rm;
+  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
+  size_t idx = 0;
+  chain[idx++] = Edge(NULL, reference);
+  // aggregate from breadth-first search
+  const Edge* current = _current_parent;
+  while (current != NULL) {
+    chain[idx++] = Edge(NULL, current->reference());
+    current = current->parent();
+  }
+  assert(length == idx, "invariant");
+  _edge_store->add_chain(chain, length);
+}
+
+void BFSClosure::dfs_fallback() {
+  assert(_edge_queue->is_full(), "invariant");
+  _use_dfs = true;
+  _dfs_fallback_idx = _edge_queue->bottom();
+  while (!_edge_queue->is_empty()) {
+    const Edge* edge = _edge_queue->remove();
+    if (edge->pointee() != NULL) {
+      DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, edge);
+    }
+  }
+}
+
+void BFSClosure::process_queue() {
+  assert(_current_frontier_level == 0, "invariant");
+  assert(_next_frontier_idx == 0, "invariant");
+  assert(_prev_frontier_idx == 0, "invariant");
+
+  _next_frontier_idx = _edge_queue->top();
+  while (!is_complete()) {
+    iterate(_edge_queue->remove()); // edge_queue.remove() increments bottom
+  }
+}
+
+void BFSClosure::step_frontier() const {
+  log_completed_frontier();
+  ++_current_frontier_level;
+  _prev_frontier_idx = _next_frontier_idx;
+  _next_frontier_idx = _edge_queue->top();
+}
+
+bool BFSClosure::is_complete() const {
+  if (_edge_queue->bottom() < _next_frontier_idx) {
+    return false;
+  }
+  if (_edge_queue->bottom() > _next_frontier_idx) {
+    // fallback onto DFS as part of processing the frontier
+    assert(_dfs_fallback_idx >= _prev_frontier_idx, "invariant");
+    assert(_dfs_fallback_idx < _next_frontier_idx, "invariant");
+    log_dfs_fallback();
+    return true;
+  }
+  assert(_edge_queue->bottom() == _next_frontier_idx, "invariant");
+  if (_edge_queue->is_empty()) {
+    return true;
+  }
+  step_frontier();
+  return false;
+}
+
+void BFSClosure::iterate(const Edge* parent) {
+  assert(parent != NULL, "invariant");
+  const oop pointee = parent->pointee();
+  assert(pointee != NULL, "invariant");
+  _current_parent = parent;
+  pointee->oop_iterate(this);
+}
+
+void BFSClosure::do_oop(oop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, HeapWordSize), "invariant");
+  const oop pointee = *ref;
+  if (pointee != NULL) {
+    closure_impl(ref, pointee);
+  }
+}
+
+void BFSClosure::do_oop(narrowOop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
+  const oop pointee = oopDesc::load_decode_heap_oop(ref);
+  if (pointee != NULL) {
+    closure_impl(UnifiedOop::encode(ref), pointee);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
+
+#include "memory/iterator.hpp"
+#include "oops/oop.hpp"
+
+class BitSet;
+class Edge;
+class EdgeStore;
+class EdgeQueue;
+
+// Class responsible for iterating the heap breadth-first
+class BFSClosure : public ExtendedOopClosure { // XXX BasicOopIterateClosure
+ private:
+  EdgeQueue* _edge_queue;
+  EdgeStore* _edge_store;
+  BitSet* _mark_bits;
+  const Edge* _current_parent;
+  mutable size_t _current_frontier_level;
+  mutable size_t _next_frontier_idx;
+  mutable size_t _prev_frontier_idx;
+  size_t _dfs_fallback_idx;
+  bool _use_dfs;
+
+  void log_completed_frontier() const;
+  void log_dfs_fallback() const;
+
+  bool is_complete() const;
+  void step_frontier() const;
+
+  void closure_impl(const oop* reference, const oop pointee);
+  void add_chain(const oop* reference, const oop pointee);
+  void dfs_fallback();
+
+  void iterate(const Edge* parent);
+  void process(const oop* reference, const oop pointee);
+
+  void process_root_set();
+  void process_queue();
+
+ public:
+  BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
+  void process();
+
+  virtual void do_oop(oop* ref);
+  virtual void do_oop(narrowOop* ref);
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/bitset.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
+#include "memory/memRegion.hpp"
+
+BitSet::BitSet(const MemRegion& covered_region) :
+  _vmm(NULL),
+  _region_start(covered_region.start()),
+  _region_size(covered_region.word_size()) {
+}
+
+BitSet::~BitSet() {
+  delete _vmm;
+}
+
+bool BitSet::initialize() {
+  assert(_vmm == NULL, "invariant");
+  _vmm = new JfrVirtualMemory();
+  if (_vmm == NULL) {
+    return false;
+  }
+
+  const BitMap::idx_t bits = _region_size >> LogMinObjAlignment;
+  const size_t words = bits / BitsPerWord;
+  const size_t raw_bytes = words * sizeof(BitMap::idx_t);
+
+  // the virtual memory invocation will reserve and commit the entire space
+  BitMap::bm_word_t* map = (BitMap::bm_word_t*)_vmm->initialize(raw_bytes, raw_bytes);
+  if (map == NULL) {
+    return false;
+  }
+  _bits = BitMap(map, bits);
+  return true;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/bitset.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/bitMap.inline.hpp"
+
+class JfrVirtualMemory;
+class MemRegion;
+
+class BitSet : public CHeapObj<mtTracing> {
+ private:
+  JfrVirtualMemory* _vmm;
+  const HeapWord* const _region_start;
+  BitMap _bits;
+  const size_t _region_size;
+
+ public:
+  BitSet(const MemRegion& covered_region);
+  ~BitSet();
+
+  bool initialize();
+
+  BitMap::idx_t mark_obj(const HeapWord* addr) {
+    const BitMap::idx_t bit = addr_to_bit(addr);
+    _bits.par_set_bit(bit);
+    return bit;
+  }
+
+  BitMap::idx_t mark_obj(oop obj) {
+    return mark_obj((HeapWord*)obj);
+  }
+
+  bool is_marked(const HeapWord* addr) const {
+    return is_marked(addr_to_bit(addr));
+  }
+
+  bool is_marked(oop obj) const {
+    return is_marked((HeapWord*)obj);
+  }
+
+  BitMap::idx_t size() const {
+    return _bits.size();
+  }
+
+  BitMap::idx_t addr_to_bit(const HeapWord* addr) const {
+    return pointer_delta(addr, _region_start) >> LogMinObjAlignment;
+  }
+
+  bool is_marked(const BitMap::idx_t bit) const {
+    return _bits.at(bit);
+  }
+};
+
+#endif  // SHARE_VM_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/utilities/granularTimer.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
+#include "jfr/leakprofiler/utilities/rootType.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/align.hpp"
+
+// max dfs depth should not exceed size of stack
+static const size_t max_dfs_depth = 5000;
+
+EdgeStore* DFSClosure::_edge_store = NULL;
+BitSet* DFSClosure::_mark_bits = NULL;
+const Edge* DFSClosure::_start_edge = NULL;
+size_t DFSClosure::_max_depth = max_dfs_depth;
+bool DFSClosure::_ignore_root_set = false;
+
+DFSClosure::DFSClosure() :
+  _parent(NULL),
+  _reference(NULL),
+  _depth(0) {
+}
+
+DFSClosure::DFSClosure(DFSClosure* parent, size_t depth) :
+  _parent(parent),
+  _reference(NULL),
+  _depth(depth) {
+}
+
+void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store,
+                                      BitSet* mark_bits,
+                                      const Edge* start_edge) {
+  assert(edge_store != NULL, "invariant");
+  assert(mark_bits != NULL," invariant");
+  assert(start_edge != NULL, "invariant");
+
+  _edge_store = edge_store;
+  _mark_bits = mark_bits;
+  _start_edge = start_edge;
+  _ignore_root_set = false;
+  assert(_max_depth == max_dfs_depth, "invariant");
+
+  // Depth-first search, starting from a BFS egde
+  DFSClosure dfs;
+  start_edge->pointee()->oop_iterate(&dfs);
+}
+
+void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
+                                          BitSet* mark_bits) {
+  assert(edge_store != NULL, "invariant");
+  assert(mark_bits != NULL, "invariant");
+
+  _edge_store = edge_store;
+  _mark_bits = mark_bits;
+  _start_edge = NULL;
+
+  // Mark root set, to avoid going sideways
+  _max_depth = 1;
+  _ignore_root_set = false;
+  DFSClosure dfs1;
+  RootSetClosure::process_roots(&dfs1);
+
+  // Depth-first search
+  _max_depth = max_dfs_depth;
+  _ignore_root_set = true;
+  assert(_start_edge == NULL, "invariant");
+  DFSClosure dfs2;
+  RootSetClosure::process_roots(&dfs2);
+}
+
+void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
+  assert(pointee != NULL, "invariant");
+  assert(reference != NULL, "invariant");
+
+  if (GranularTimer::is_finished()) {
+     return;
+  }
+  if (_depth == 0 && _ignore_root_set) {
+    // Root set is already marked, but we want
+    // to continue, so skip is_marked check.
+    assert(_mark_bits->is_marked(pointee), "invariant");
+  } else {
+    if (_mark_bits->is_marked(pointee)) {
+      return;
+    }
+  }
+
+  _reference = reference;
+  _mark_bits->mark_obj(pointee);
+  assert(_mark_bits->is_marked(pointee), "invariant");
+
+  // is the pointee a sample object?
+  if (NULL == pointee->mark()) {
+    add_chain();
+  }
+
+  assert(_max_depth >= 1, "invariant");
+  if (_depth < _max_depth - 1) {
+    DFSClosure next_level(this, _depth + 1);
+    pointee->oop_iterate(&next_level);
+  }
+}
+
+void DFSClosure::add_chain() {
+  const size_t length = _start_edge == NULL ? _depth + 1 :
+                        _start_edge->distance_to_root() + 1 + _depth + 1;
+
+  ResourceMark rm;
+  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
+  size_t idx = 0;
+
+  // aggregate from depth-first search
+  const DFSClosure* c = this;
+  while (c != NULL) {
+    chain[idx++] = Edge(NULL, c->reference());
+    c = c->parent();
+  }
+
+  assert(idx == _depth + 1, "invariant");
+
+  // aggregate from breadth-first search
+  const Edge* current = _start_edge;
+  while (current != NULL) {
+    chain[idx++] = Edge(NULL, current->reference());
+    current = current->parent();
+  }
+  assert(idx == length, "invariant");
+  _edge_store->add_chain(chain, length);
+}
+
+void DFSClosure::do_oop(oop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, HeapWordSize), "invariant");
+  const oop pointee = *ref;
+  if (pointee != NULL) {
+    closure_impl(ref, pointee);
+  }
+}
+
+void DFSClosure::do_oop(narrowOop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
+  const oop pointee = oopDesc::load_decode_heap_oop(ref);
+  if (pointee != NULL) {
+    closure_impl(UnifiedOop::encode(ref), pointee);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
+
+#include "memory/iterator.hpp"
+#include "oops/oop.hpp"
+
+class BitSet;
+class Edge;
+class EdgeStore;
+class EdgeQueue;
+
+// Class responsible for iterating the heap depth-first
+class DFSClosure: public ExtendedOopClosure { // XXX BasicOopIterateClosure
+ private:
+  static EdgeStore* _edge_store;
+  static BitSet*    _mark_bits;
+  static const Edge*_start_edge;
+  static size_t _max_depth;
+  static bool _ignore_root_set;
+  DFSClosure* _parent;
+  const oop* _reference;
+  size_t _depth;
+
+  void add_chain();
+  void closure_impl(const oop* reference, const oop pointee);
+
+  DFSClosure* parent() const { return _parent; }
+  const oop* reference() const { return _reference; }
+
+  DFSClosure(DFSClosure* parent, size_t depth);
+  DFSClosure();
+
+ public:
+  static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
+  static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
+
+  virtual void do_oop(oop* ref);
+  virtual void do_oop(narrowOop* ref);
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edge.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
+
+Edge::Edge() : _parent(NULL), _reference(NULL) {}
+
+Edge::Edge(const Edge* parent, const oop* reference) : _parent(parent),
+                                                       _reference(reference) {}
+
+const oop Edge::pointee() const {
+  return UnifiedOop::dereference(_reference);
+}
+
+const oop Edge::reference_owner() const {
+  return is_root() ? (oop)NULL : UnifiedOop::dereference(_parent->reference());
+}
+
+static const Klass* resolve_klass(const oop obj) {
+  assert(obj != NULL, "invariant");
+  return java_lang_Class::is_instance(obj) ?
+    java_lang_Class::as_Klass(obj) : obj->klass();
+}
+
+const Klass* Edge::pointee_klass() const {
+  return resolve_klass(pointee());
+}
+
+const Klass* Edge::reference_owner_klass() const {
+  const oop ref_owner = reference_owner();
+  return ref_owner != NULL ? resolve_klass(ref_owner) : NULL;
+}
+
+size_t Edge::distance_to_root() const {
+  size_t depth = 0;
+  const Edge* current = _parent;
+  while (current != NULL) {
+    depth++;
+    current = current->parent();
+  }
+  return depth;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edge.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGE_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+
+class Edge {
+ private:
+  const Edge* _parent;
+  const oop* _reference;
+ public:
+  Edge();
+  Edge(const Edge* parent, const oop* reference);
+
+  const oop* reference() const {
+    return _reference;
+  }
+  const Edge* parent() const {
+    return _parent;
+  }
+  bool is_root() const {
+    return _parent == NULL;
+  }
+  const oop pointee() const;
+  const Klass* pointee_klass() const;
+  const oop reference_owner() const;
+  const Klass* reference_owner_klass() const;
+  size_t distance_to_root() const;
+
+  void* operator new (size_t sz, void* here) {
+    return here;
+  }
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeQueue.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/edgeQueue.hpp"
+#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
+
+EdgeQueue::EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_bytes) :
+  _vmm(NULL),
+  _reservation_size_bytes(reservation_size_bytes),
+  _commit_block_size_bytes(commit_block_size_bytes),
+  _top_index(0),
+  _bottom_index(0) {
+}
+
+bool EdgeQueue::initialize() {
+  assert(_reservation_size_bytes >= _commit_block_size_bytes, "invariant");
+  assert(_vmm == NULL, "invariant");
+  _vmm = new JfrVirtualMemory();
+  return _vmm != NULL && _vmm->initialize(_reservation_size_bytes, _commit_block_size_bytes, sizeof(Edge));
+}
+
+EdgeQueue::~EdgeQueue() {
+  delete _vmm;
+}
+
+void EdgeQueue::add(const Edge* parent, const oop* ref) {
+  assert(ref != NULL, "Null objects not allowed in EdgeQueue");
+  assert(!is_full(), "EdgeQueue is full. Check is_full before adding another Edge");
+  assert(!_vmm->is_full(), "invariant");
+  void* const allocation = _vmm->new_datum();
+  assert(allocation != NULL, "invariant");
+  new (allocation)Edge(parent, ref);
+  _top_index++;
+  assert(_vmm->count() == _top_index, "invariant");
+}
+
+size_t EdgeQueue::top() const {
+  return _top_index;
+}
+
+size_t EdgeQueue::bottom() const {
+  return EdgeQueue::_bottom_index;
+}
+
+bool EdgeQueue::is_empty() const {
+  return _top_index == _bottom_index;
+}
+
+bool EdgeQueue::is_full() const {
+  return _vmm->is_full();
+}
+
+const Edge* EdgeQueue::remove() const {
+  assert(!is_empty(), "EdgeQueue is empty. Check if empty before removing Edge");
+  assert(!_vmm->is_empty(), "invariant");
+  return (const Edge*)_vmm->get(_bottom_index++);
+}
+
+const Edge* EdgeQueue::element_at(size_t index) const {
+  assert(index >= _bottom_index, "invariant");
+  assert(index <_top_index, "invariant");
+  return (Edge*)_vmm->get(index);
+}
+
+size_t EdgeQueue::reserved_size() const {
+  assert(_vmm != NULL, "invariant");
+  return _vmm->reserved_size();
+}
+
+size_t EdgeQueue::live_set() const {
+  assert(_vmm != NULL, "invariant");
+  return _vmm->live_set();
+}
+
+size_t EdgeQueue::sizeof_edge() const {
+  assert(_vmm != NULL, "invariant");
+  return _vmm->aligned_datum_size_bytes();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeQueue.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+
+class JfrVirtualMemory;
+
+class EdgeQueue : public CHeapObj<mtTracing> {
+ private:
+  JfrVirtualMemory* _vmm;
+  const size_t _reservation_size_bytes;
+  const size_t _commit_block_size_bytes;
+  mutable size_t _top_index;
+  mutable size_t _bottom_index;
+ public:
+  EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_bytes);
+  ~EdgeQueue();
+
+  bool initialize();
+
+  void add(const Edge* parent, const oop* ref);
+  const Edge* remove() const;
+  const Edge* element_at(size_t index) const;
+
+  size_t top() const;
+  size_t bottom() const;
+  bool is_empty() const;
+  bool is_full() const;
+
+  size_t reserved_size() const;
+  size_t live_set() const;
+  size_t sizeof_edge() const; // with alignments
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/edgeUtils.hpp"
+#include "oops/oop.inline.hpp"
+
+RoutableEdge::RoutableEdge() : Edge() {}
+RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference),
+                                                                       _skip_edge(NULL),
+                                                                       _skip_length(0),
+                                                                       _processed(false) {}
+
+RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge),
+                                               _skip_edge(NULL),
+                                               _skip_length(0),
+                                               _processed(false) {}
+
+RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge),
+                                                      _skip_edge(edge._skip_edge),
+                                                      _skip_length(edge._skip_length),
+                                                      _processed(edge._processed) {}
+
+void RoutableEdge::operator=(const RoutableEdge& edge) {
+  Edge::operator=(edge);
+  _skip_edge = edge._skip_edge;
+  _skip_length = edge._skip_length;
+  _processed = edge._processed;
+}
+
+size_t RoutableEdge::logical_distance_to_root() const {
+  size_t depth = 0;
+  const RoutableEdge* current = logical_parent();
+  while (current != NULL) {
+    depth++;
+    current = current->logical_parent();
+  }
+  return depth;
+}
+
+traceid EdgeStore::_edge_id_counter = 0;
+
+EdgeStore::EdgeStore() : _edges(NULL) {
+  _edges = new EdgeHashTable(this);
+}
+
+EdgeStore::~EdgeStore() {
+  assert(_edges != NULL, "invariant");
+  delete _edges;
+  _edges = NULL;
+}
+
+const Edge* EdgeStore::get_edge(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
+  return entry != NULL ? entry->literal_addr() : NULL;
+}
+
+const Edge* EdgeStore::put(const Edge* edge) {
+  assert(edge != NULL, "invariant");
+  const RoutableEdge e = *edge;
+  assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant");
+  EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference());
+  return entry.literal_addr();
+}
+
+traceid EdgeStore::get_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
+  assert(entry != NULL, "invariant");
+  return entry->id();
+}
+
+traceid EdgeStore::get_root_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  const Edge* root = EdgeUtils::root(*edge);
+  assert(root != NULL, "invariant");
+  return get_id(root);
+}
+
+void EdgeStore::add_chain(const Edge* chain, size_t length) {
+  assert(chain != NULL, "invariant");
+  assert(length > 0, "invariant");
+
+  size_t bottom_index = length - 1;
+  const size_t top_index = 0;
+
+  const Edge* stored_parent_edge = NULL;
+
+  // determine level of shared ancestry
+  for (; bottom_index > top_index; --bottom_index) {
+    const Edge* stored_edge = get_edge(&chain[bottom_index]);
+    if (stored_edge != NULL) {
+      stored_parent_edge = stored_edge;
+      continue;
+    }
+    break;
+  }
+
+  // insertion of new Edges
+  for (int i = (int)bottom_index; i >= (int)top_index; --i) {
+    Edge edge(stored_parent_edge, chain[i].reference());
+    stored_parent_edge = put(&edge);
+  }
+
+  const oop sample_object = stored_parent_edge->pointee();
+  assert(sample_object != NULL, "invariant");
+  assert(NULL == sample_object->mark(), "invariant");
+
+  // Install the "top" edge of the chain into the sample object mark oop.
+  // This associates the sample object with its navigable reference chain.
+  sample_object->set_mark(markOop(stored_parent_edge));
+}
+
+bool EdgeStore::is_empty() const {
+  return !_edges->has_entries();
+}
+
+size_t EdgeStore::number_of_entries() const {
+  return _edges->cardinality();
+}
+
+void EdgeStore::assign_id(EdgeEntry* entry) {
+  assert(entry != NULL, "invariant");
+  assert(entry->id() == 0, "invariant");
+  entry->set_id(++_edge_id_counter);
+}
+
+bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry) {
+  assert(entry != NULL, "invariant");
+  assert(entry->hash() == hash, "invariant");
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
+#define SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
+
+#include "jfr/utilities/jfrHashtable.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "memory/allocation.hpp"
+
+typedef u8 traceid;
+
+class RoutableEdge : public Edge {
+ private:
+  mutable const RoutableEdge* _skip_edge;
+  mutable size_t _skip_length;
+  mutable bool _processed;
+
+ public:
+  RoutableEdge();
+  RoutableEdge(const Edge* parent, const oop* reference);
+  RoutableEdge(const Edge& edge);
+  RoutableEdge(const RoutableEdge& edge);
+  void operator=(const RoutableEdge& edge);
+
+  const RoutableEdge* skip_edge() const { return _skip_edge; }
+  size_t skip_length() const { return _skip_length; }
+
+  bool is_skip_edge() const { return _skip_edge != NULL; }
+  bool processed() const { return _processed; }
+  bool is_sentinel() const {
+    return _skip_edge == NULL && _skip_length == 1;
+  }
+
+  void set_skip_edge(const RoutableEdge* edge) const {
+    assert(!is_skip_edge(), "invariant");
+    assert(edge != this, "invariant");
+    _skip_edge = edge;
+  }
+
+  void set_skip_length(size_t length) const {
+    _skip_length = length;
+  }
+
+  void set_processed() const {
+    assert(!_processed, "invariant");
+    _processed = true;
+  }
+
+  // true navigation according to physical tree representation
+  const RoutableEdge* physical_parent() const {
+    return static_cast<const RoutableEdge*>(parent());
+  }
+
+  // logical navigation taking skip levels into account
+  const RoutableEdge* logical_parent() const {
+    return is_skip_edge() ? skip_edge() : physical_parent();
+  }
+
+  size_t logical_distance_to_root() const;
+};
+
+class EdgeStore : public CHeapObj<mtTracing> {
+  typedef HashTableHost<RoutableEdge, traceid, Entry, EdgeStore> EdgeHashTable;
+  typedef EdgeHashTable::HashEntry EdgeEntry;
+  template <typename,
+            typename,
+            template<typename, typename> class,
+            typename,
+            size_t>
+  friend class HashTableHost;
+ private:
+  static traceid _edge_id_counter;
+  EdgeHashTable* _edges;
+
+  // Hash table callbacks
+  void assign_id(EdgeEntry* entry);
+  bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
+
+  const Edge* get_edge(const Edge* edge) const;
+  const Edge* put(const Edge* edge);
+
+ public:
+  EdgeStore();
+  ~EdgeStore();
+
+  void add_chain(const Edge* chain, size_t length);
+  bool is_empty() const;
+  size_t number_of_entries() const;
+
+  traceid get_id(const Edge* edge) const;
+  traceid get_root_id(const Edge* edge) const;
+
+  template <typename T>
+  void iterate_edges(T& functor) const { _edges->iterate_value<T>(functor); }
+};
+
+#endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/edgeUtils.hpp"
+#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
+#include "oops/fieldStreams.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/objArrayOop.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "runtime/handles.inline.hpp"
+
+bool EdgeUtils::is_leak_edge(const Edge& edge) {
+  return (const Edge*)edge.pointee()->mark() == &edge;
+}
+
+bool EdgeUtils::is_root(const Edge& edge) {
+  return edge.is_root();
+}
+
+static int field_offset(const Edge& edge) {
+  assert(!edge.is_root(), "invariant");
+  const oop ref_owner = edge.reference_owner();
+  assert(ref_owner != NULL, "invariant");
+  const oop* reference = UnifiedOop::decode(edge.reference());
+  assert(reference != NULL, "invariant");
+  assert(!UnifiedOop::is_narrow(reference), "invariant");
+  assert(!ref_owner->is_array(), "invariant");
+  assert(ref_owner->is_instance(), "invariant");
+  const int offset = (int)pointer_delta(reference, ref_owner, sizeof(char));
+  assert(offset < (ref_owner->size() * HeapWordSize), "invariant");
+  return offset;
+}
+
+static const InstanceKlass* field_type(const Edge& edge) {
+  assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant");
+  return (const InstanceKlass*)edge.reference_owner_klass();
+}
+
+const Symbol* EdgeUtils::field_name_symbol(const Edge& edge) {
+  assert(!edge.is_root(), "invariant");
+  assert(!is_array_element(edge), "invariant");
+  const int offset = field_offset(edge);
+  const InstanceKlass* ik = field_type(edge);
+  while (ik != NULL) {
+    JavaFieldStream jfs(ik);
+    while (!jfs.done()) {
+      if (offset == jfs.offset()) {
+        return jfs.name();
+      }
+      jfs.next();
+    }
+    ik = (InstanceKlass*)ik->super();
+  }
+  return NULL;
+}
+
+jshort EdgeUtils::field_modifiers(const Edge& edge) {
+  const int offset = field_offset(edge);
+  const InstanceKlass* ik = field_type(edge);
+
+  while (ik != NULL) {
+    JavaFieldStream jfs(ik);
+    while (!jfs.done()) {
+      if (offset == jfs.offset()) {
+        return jfs.access_flags().as_short();
+      }
+      jfs.next();
+    }
+    ik = (InstanceKlass*)ik->super();
+  }
+  return 0;
+}
+
+bool EdgeUtils::is_array_element(const Edge& edge) {
+  assert(!edge.is_root(), "invariant");
+  const oop ref_owner = edge.reference_owner();
+  assert(ref_owner != NULL, "invariant");
+  return ref_owner->is_objArray();
+}
+
+static int array_offset(const Edge& edge) {
+  assert(!edge.is_root(), "invariant");
+  const oop ref_owner = edge.reference_owner();
+  assert(ref_owner != NULL, "invariant");
+  const oop* reference = UnifiedOop::decode(edge.reference());
+  assert(reference != NULL, "invariant");
+  assert(!UnifiedOop::is_narrow(reference), "invariant");
+  assert(ref_owner->is_array(), "invariant");
+  const objArrayOop ref_owner_array = static_cast<const objArrayOop>(ref_owner);
+  const int offset = (int)pointer_delta(reference, ref_owner_array->base(), heapOopSize);
+  assert(offset >= 0 && offset < ref_owner_array->length(), "invariant");
+  return offset;
+}
+
+int EdgeUtils::array_index(const Edge& edge) {
+  return is_array_element(edge) ? array_offset(edge) : 0;
+}
+
+int EdgeUtils::array_size(const Edge& edge) {
+  if (is_array_element(edge)) {
+    const oop ref_owner = edge.reference_owner();
+    assert(ref_owner != NULL, "invariant");
+    assert(ref_owner->is_objArray(), "invariant");
+    return ((objArrayOop)(ref_owner))->length();
+  }
+  return 0;
+}
+
+const Edge* EdgeUtils::root(const Edge& edge) {
+  const Edge* current = &edge;
+  const Edge* parent = current->parent();
+  while (parent != NULL) {
+    current = parent;
+    parent = current->parent();
+  }
+  return current;
+}
+
+// The number of references associated with the leak node;
+// can be viewed as the leak node "context".
+// Used to provide leak context for a "capped/skipped" reference chain.
+static const size_t leak_context = 100;
+
+// The number of references associated with the root node;
+// can be viewed as the root node "context".
+// Used to provide root context for a "capped/skipped" reference chain.
+static const size_t root_context = 100;
+
+// A limit on the reference chain depth to be serialized,
+static const size_t max_ref_chain_depth = leak_context + root_context;
+
+const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) {
+  const RoutableEdge* current = &edge;
+  const RoutableEdge* parent = current->physical_parent();
+  size_t seek = 0;
+  while (parent != NULL && seek != skip_length) {
+    seek++;
+    current = parent;
+    parent = parent->physical_parent();
+  }
+  return current;
+}
+
+#ifdef ASSERT
+static void validate_skip_target(const RoutableEdge* skip_target) {
+  assert(skip_target != NULL, "invariant");
+  assert(skip_target->distance_to_root() + 1 == root_context, "invariant");
+  assert(skip_target->is_sentinel(), "invariant");
+}
+
+static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) {
+  assert(new_skip_edge != NULL, "invariant");
+  assert(new_skip_edge->is_skip_edge(), "invariant");
+  if (last_skip_edge != NULL) {
+    const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment);
+    validate_skip_target(target->logical_parent());
+    return;
+  }
+  assert(last_skip_edge == NULL, "invariant");
+  // only one level of logical indirection
+  validate_skip_target(new_skip_edge->logical_parent());
+}
+#endif // ASSERT
+
+static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) {
+  assert(new_skip_edge != NULL, "invariant");
+  assert(!new_skip_edge->is_skip_edge(), "invariant");
+  assert(!new_skip_edge->processed(), "invariant");
+  const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance);
+  assert(skip_target != NULL, "invariant");
+  new_skip_edge->set_skip_edge(skip_target);
+  new_skip_edge->set_skip_length(skip_target_distance);
+  assert(new_skip_edge->is_skip_edge(), "invariant");
+  assert(new_skip_edge->logical_parent() == skip_target, "invariant");
+}
+
+static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) {
+  assert(distance == 0, "invariant");
+  const RoutableEdge* current = &edge;
+  while (current != NULL) {
+    if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) {
+      return current;
+    }
+    current = current->physical_parent();
+    ++distance;
+  }
+  return current;
+}
+
+static void collapse_overlapping_chain(const RoutableEdge& edge,
+                                       const RoutableEdge* first_processed_edge,
+                                       size_t first_processed_distance) {
+  assert(first_processed_edge != NULL, "invariant");
+  // first_processed_edge is already processed / written
+  assert(first_processed_edge->processed(), "invariant");
+  assert(first_processed_distance + 1 <= leak_context, "invariant");
+
+  // from this first processed edge, attempt to fetch the last skip edge
+  size_t last_skip_edge_distance = 0;
+  const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance);
+  const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1;
+
+  if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) {
+    // complete chain can be accommodated without modification
+    return;
+  }
+
+  // backtrack one edge from existing processed edge
+  const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1);
+  assert(new_skip_edge != NULL, "invariant");
+  assert(!new_skip_edge->processed(), "invariant");
+  assert(new_skip_edge->parent() == first_processed_edge, "invariant");
+
+  size_t adjustment = 0;
+  if (last_skip_edge != NULL) {
+    assert(leak_context - 1 > first_processed_distance - 1, "invariant");
+    adjustment = leak_context - first_processed_distance - 1;
+    assert(last_skip_edge_distance + 1 > adjustment, "invariant");
+    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment);
+  } else {
+    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context);
+    new_skip_edge->logical_parent()->set_skip_length(1); // sentinel
+  }
+
+  DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);)
+}
+
+static void collapse_non_overlapping_chain(const RoutableEdge& edge,
+                                           const RoutableEdge* first_processed_edge,
+                                           size_t first_processed_distance) {
+  assert(first_processed_edge != NULL, "invariant");
+  assert(!first_processed_edge->processed(), "invariant");
+  // this implies that the first "processed" edge is the leak context relative "leaf"
+  assert(first_processed_distance + 1 == leak_context, "invariant");
+
+  const size_t distance_to_root = edge.distance_to_root();
+  if (distance_to_root + 1 <= max_ref_chain_depth) {
+    // complete chain can be accommodated without constructing a skip edge
+    return;
+  }
+
+  install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context);
+  first_processed_edge->logical_parent()->set_skip_length(1); // sentinel
+
+  DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);)
+}
+
+static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) {
+  assert(distance == 0, "invariant");
+  const RoutableEdge* current = &edge;
+  while (current != NULL && distance < leak_context - 1) {
+    if (current->processed()) {
+      return current;
+    }
+    current = current->physical_parent();
+    ++distance;
+  }
+  assert(distance <= leak_context - 1, "invariant");
+  return current;
+}
+
+/*
+ * Some vocabulary:
+ * -----------
+ * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
+ * "Processed / written" means an edge that has already been serialized.
+ * "Skip edge" is an edge that contains additional information for logical routing purposes.
+ * "Skip target" is an edge used as a destination for a skip edge
+ */
+void EdgeUtils::collapse_chain(const RoutableEdge& edge) {
+  assert(is_leak_edge(edge), "invariant");
+
+  // attempt to locate an already processed edge inside current leak context (if any)
+  size_t first_processed_distance = 0;
+  const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance);
+  if (first_processed_edge == NULL) {
+    return;
+  }
+
+  if (first_processed_edge->processed()) {
+    collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance);
+  } else {
+    collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance);
+  }
+
+  assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
+#define SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
+
+#include "memory/allocation.hpp"
+
+class Edge;
+class RoutableEdge;
+class Symbol;
+
+class EdgeUtils : public AllStatic {
+ public:
+  static bool is_leak_edge(const Edge& edge);
+
+  static const Edge* root(const Edge& edge);
+  static bool is_root(const Edge& edge);
+
+  static bool is_array_element(const Edge& edge);
+  static int array_index(const Edge& edge);
+  static int array_size(const Edge& edge);
+
+  static const Symbol* field_name_symbol(const Edge& edge);
+  static jshort field_modifiers(const Edge& edge);
+
+  static void collapse_chain(const RoutableEdge& edge);
+};
+
+#endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/objectSampleMarker.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_OBJECTSAMPLEMARKER_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_OBJECTSAMPLEMARKER_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/markOop.hpp"
+#include "utilities/growableArray.hpp"
+//
+// This class will save the original mark oop of a object sample object.
+// It will then install an "identifier" mark oop to be used for
+// identification purposes in the search for reference chains.
+// The destructor will restore each modified oop with its original mark oop.
+//
+class ObjectSampleMarker : public StackObj {
+ private:
+  class ObjectSampleMarkOop : public ResourceObj {
+    friend class ObjectSampleMarker;
+   private:
+    oop _obj;
+    markOop _mark_oop;
+    ObjectSampleMarkOop(const oop obj,
+                        const markOop mark_oop) : _obj(obj),
+                                                  _mark_oop(mark_oop) {}
+   public:
+    ObjectSampleMarkOop() : _obj(NULL), _mark_oop(NULL) {}
+  };
+
+  GrowableArray<ObjectSampleMarkOop>* _store;
+
+ public:
+  ObjectSampleMarker() :
+       _store(new GrowableArray<ObjectSampleMarkOop>(16)) {}
+  ~ObjectSampleMarker() {
+    assert(_store != NULL, "invariant");
+    // restore the saved, original, markOop for sample objects
+    while (_store->is_nonempty()) {
+      ObjectSampleMarkOop sample_oop = _store->pop();
+      sample_oop._obj->set_mark(sample_oop._mark_oop);
+      assert(sample_oop._obj->mark() == sample_oop._mark_oop, "invariant");
+    }
+  }
+
+  void mark(oop obj) {
+    assert(obj != NULL, "invariant");
+    // save the original markOop
+    _store->push(ObjectSampleMarkOop(obj, obj->mark()));
+    // now we will "poison" the mark word of the sample object
+    // to the intermediate monitor INFLATING state.
+    // This is an "impossible" state during a safepoint,
+    // hence we will use it to quickly identify sample objects
+    // during the reachability search from gc roots.
+    assert(NULL == markOopDesc::INFLATING(), "invariant");
+    obj->set_mark(markOopDesc::INFLATING());
+    assert(NULL == obj->mark(), "invariant");
+  }
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_OBJECTSAMPLEMARKER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "jfr/leakprofiler/chains/edgeQueue.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
+#include "jfr/leakprofiler/utilities/saveRestore.hpp"
+#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
+#include "memory/universe.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/thread.hpp"
+#include "services/management.hpp"
+#include "utilities/align.hpp"
+
+RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) :
+  _edge_queue(edge_queue) {
+}
+
+void RootSetClosure::do_oop(oop* ref) {
+  assert(ref != NULL, "invariant");
+  // We discard unaligned root references because
+  // our reference tagging scheme will use
+  // the lowest bit in a represented reference
+  // to indicate the reference is narrow.
+  // It is mainly roots delivered via nmethods::do_oops()
+  // that come in unaligned. It should be ok to duck these
+  // since they are supposedly weak.
+  if (!is_aligned(ref, HeapWordSize)) {
+    return;
+  }
+
+  assert(is_aligned(ref, HeapWordSize), "invariant");
+  const oop pointee = *ref;
+  if (pointee != NULL) {
+    closure_impl(ref, pointee);
+  }
+}
+
+void RootSetClosure::do_oop(narrowOop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
+  const oop pointee = oopDesc::load_decode_heap_oop(ref);
+  if (pointee != NULL) {
+    closure_impl(UnifiedOop::encode(ref), pointee);
+  }
+}
+
+void RootSetClosure::closure_impl(const oop* reference, const oop pointee) {
+  if (!_edge_queue->is_full())  {
+    _edge_queue->add(NULL, reference);
+  }
+}
+
+void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) {
+  RootSetClosure rs(edge_queue);
+  process_roots(&rs);
+}
+
+class RootSetClosureMarkScope : public MarkingCodeBlobClosure::MarkScope {
+};
+
+void RootSetClosure::process_roots(OopClosure* closure) {
+  SaveRestoreCLDClaimBits save_restore_cld_claim_bits;
+  RootSetClosureMarkScope mark_scope;
+
+  CLDToOopClosure cldt_closure(closure);
+  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
+  CodeBlobToOopClosure blobs(closure, false);
+  Threads::oops_do(closure, NULL, &blobs); // XXX set CLDClosure to NULL
+  ObjectSynchronizer::oops_do(closure);
+  Universe::oops_do(closure);
+  JNIHandles::oops_do(closure);
+  JvmtiExport::oops_do(closure);
+  SystemDictionary::oops_do(closure);
+  Management::oops_do(closure);
+  StringTable::oops_do(closure);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
+
+#include "memory/iterator.hpp"
+#include "oops/oop.hpp"
+
+class EdgeQueue;
+
+class RootSetClosure: public ExtendedOopClosure { // BasicOopIterateClosure
+ private:
+  RootSetClosure(EdgeQueue* edge_queue);
+  EdgeQueue* _edge_queue;
+  void closure_impl(const oop* reference, const oop pointee);
+ public:
+  static void add_to_queue(EdgeQueue* edge_queue);
+  static void process_roots(OopClosure* closure);
+
+  virtual void do_oop(oop* reference);
+  virtual void do_oop(narrowOop* reference);
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/leakprofiler/utilities/rootType.hpp"
+#include "jfr/metadata/jfrSerializer.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.inline.hpp"
+
+template <typename SampleProcessor>
+static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) {
+  assert(sample != NULL, "invariant");
+  while (sample != end) {
+    processor.sample_do(sample);
+    sample = sample->next();
+  }
+}
+
+class RootSystemType : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer) {
+    const u4 nof_root_systems = OldObjectRoot::_number_of_systems;
+    writer.write_count(nof_root_systems);
+    for (u4 i = 0; i < nof_root_systems; ++i) {
+      writer.write_key(i);
+      writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i));
+    }
+  }
+};
+
+class RootType : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer) {
+    const u4 nof_root_types = OldObjectRoot::_number_of_types;
+    writer.write_count(nof_root_types);
+    for (u4 i = 0; i < nof_root_types; ++i) {
+      writer.write_key(i);
+      writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i));
+    }
+  }
+};
+
+class CheckpointInstall {
+ private:
+  const JfrCheckpointBlobHandle& _cp;
+ public:
+  CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {}
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    if (!sample->is_dead()) {
+      sample->set_klass_checkpoint(_cp);
+    }
+  }
+};
+
+class CheckpointWrite {
+ private:
+  JfrCheckpointWriter& _writer;
+  const jlong _last_sweep;
+ public:
+  CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {}
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    if (sample->is_alive_and_older_than(_last_sweep)) {
+      if (sample->has_thread_checkpoint()) {
+        const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
+        thread_cp->exclusive_write(_writer);
+      }
+      if (sample->has_klass_checkpoint()) {
+        const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
+        klass_cp->exclusive_write(_writer);
+      }
+    }
+  }
+};
+
+class CheckpointStateReset {
+ private:
+  const jlong _last_sweep;
+ public:
+  CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {}
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    if (sample->is_alive_and_older_than(_last_sweep)) {
+      if (sample->has_thread_checkpoint()) {
+        const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
+        thread_cp->reset_write_state();
+      }
+      if (sample->has_klass_checkpoint()) {
+        const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
+        klass_cp->reset_write_state();
+      }
+    }
+  }
+};
+
+class StackTraceWrite {
+ private:
+  JfrStackTraceRepository& _stack_trace_repo;
+  JfrCheckpointWriter& _writer;
+  int _count;
+ public:
+  StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) :
+    _stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) {
+    JfrStacktrace_lock->lock();
+  }
+  ~StackTraceWrite() {
+    assert(JfrStacktrace_lock->owned_by_self(), "invariant");
+    JfrStacktrace_lock->unlock();
+  }
+
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    if (!sample->is_dead()) {
+      if (sample->has_stack_trace()) {
+        JfrTraceId::use(sample->klass(), true);
+        _stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash());
+        ++_count;
+      }
+    }
+  }
+
+  int count() const {
+    return _count;
+  }
+};
+
+class SampleMark {
+ private:
+  ObjectSampleMarker& _marker;
+  jlong _last_sweep;
+  int _count;
+ public:
+  SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker),
+                                                             _last_sweep(last_sweep),
+                                                             _count(0) {}
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    if (sample->is_alive_and_older_than(_last_sweep)) {
+      _marker.mark(sample->object());
+      ++_count;
+    }
+  }
+
+  int count() const {
+    return _count;
+  }
+};
+
+void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) {
+  assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant");
+
+  if (!writer.has_data()) {
+    if (!class_unload) {
+      LeakProfiler::resume();
+    }
+    assert(LeakProfiler::is_running(), "invariant");
+    return;
+  }
+
+  assert(writer.has_data(), "invariant");
+  const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
+
+  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
+  assert(object_sampler != NULL, "invariant");
+
+  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+  const ObjectSample* const last_resolved = object_sampler->last_resolved();
+  CheckpointInstall install(h_cp);
+
+  if (class_unload) {
+    if (last != NULL) {
+      // all samples need the class unload information
+      do_samples(last, NULL, install);
+    }
+    assert(LeakProfiler::is_running(), "invariant");
+    return;
+  }
+
+  // only new samples since last resolved checkpoint
+  if (last != last_resolved) {
+    do_samples(last, last_resolved, install);
+    if (resume) {
+      const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
+    }
+  }
+  assert(LeakProfiler::is_suspended(), "invariant");
+  if (resume) {
+    LeakProfiler::resume();
+    assert(LeakProfiler::is_running(), "invariant");
+  }
+}
+
+void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) {
+  assert(edge_store != NULL, "invariant");
+  assert(thread != NULL, "invariant");
+  static bool types_registered = false;
+  if (!types_registered) {
+    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
+    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
+    types_registered = true;
+  }
+  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
+  assert(object_sampler != NULL, "invariant");
+  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
+  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+  {
+    JfrCheckpointWriter writer(false, false, thread);
+    CheckpointWrite checkpoint_write(writer, last_sweep);
+    do_samples(last, NULL, checkpoint_write);
+  }
+  CheckpointStateReset state_reset(last_sweep);
+  do_samples(last, NULL, state_reset);
+  if (!edge_store->is_empty()) {
+    // java object and chain representations
+    JfrCheckpointWriter writer(false, true, thread);
+    ObjectSampleWriter osw(writer, edge_store);
+    edge_store->iterate_edges(osw);
+  }
+}
+
+WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) :
+  _stack_trace_repo(repo) {
+}
+
+bool WriteObjectSampleStacktrace::process() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  if (!LeakProfiler::is_running()) {
+    return true;
+  }
+  // Suspend the LeakProfiler subsystem
+  // to ensure stable samples even
+  // after we return from the safepoint.
+  LeakProfiler::suspend();
+  assert(!LeakProfiler::is_running(), "invariant");
+  assert(LeakProfiler::is_suspended(), "invariant");
+
+  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
+  assert(object_sampler != NULL, "invariant");
+  assert(LeakProfiler::is_suspended(), "invariant");
+
+  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+  const ObjectSample* const last_resolved = object_sampler->last_resolved();
+  if (last == last_resolved) {
+    assert(LeakProfiler::is_suspended(), "invariant");
+    return true;
+  }
+
+  JfrCheckpointWriter writer(false, true, Thread::current());
+  const JfrCheckpointContext ctx = writer.context();
+
+  writer.write_type(TYPE_STACKTRACE);
+  const jlong count_offset = writer.reserve(sizeof(u4));
+
+  int count = 0;
+  {
+    StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock
+    do_samples(last, last_resolved, stack_trace_write);
+    count = stack_trace_write.count();
+  }
+  if (count == 0) {
+    writer.set_context(ctx);
+    assert(LeakProfiler::is_suspended(), "invariant");
+    return true;
+  }
+  assert(count > 0, "invariant");
+  writer.write_count((u4)count, count_offset);
+  JfrStackTraceRepository::write_metadata(writer);
+
+  ObjectSampleCheckpoint::install(writer, false, false);
+  assert(LeakProfiler::is_suspended(), "invariant");
+  return true;
+}
+
+int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) {
+  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
+  assert(object_sampler != NULL, "invariant");
+  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+  if (last == NULL) {
+    return 0;
+  }
+  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
+  SampleMark mark(marker, last_sweep);
+  do_samples(last, NULL, mark);
+  return mark.count();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
+#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/exceptions.hpp"
+
+class EdgeStore;
+class JfrStackTraceRepository;
+class JfrCheckpointWriter;
+class ObjectSampleMarker;
+
+class ObjectSampleCheckpoint : AllStatic {
+ public:
+  static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume);
+  static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread);
+  static int mark(ObjectSampleMarker& marker, bool emit_all);
+};
+
+class WriteObjectSampleStacktrace : public StackObj {
+ private:
+  JfrStackTraceRepository& _stack_trace_repo;
+ public:
+  WriteObjectSampleStacktrace(JfrStackTraceRepository& repo);
+  bool process();
+};
+
+#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleDescription.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/ostream.hpp"
+
+static Symbol* symbol_size = NULL;
+
+ObjectDescriptionBuilder::ObjectDescriptionBuilder() {
+  reset();
+}
+
+void ObjectDescriptionBuilder::write_int(jint value) {
+  char buf[20];
+  jio_snprintf(buf, sizeof(buf), "%d", value);
+  write_text(buf);
+}
+
+void ObjectDescriptionBuilder::write_text(const char* text) {
+  if (_index == sizeof(_buffer) - 2) {
+    return;
+  }
+  while (*text != '\0' && _index < sizeof(_buffer) - 2) {
+    _buffer[_index] = *text;
+    _index++;
+    text++;
+  }
+  assert(_index < sizeof(_buffer) - 1, "index should not exceed buffer size");
+  // add ellipsis if we reached end
+  if (_index == sizeof(_buffer) - 2) {
+    _buffer[_index-3] = '.';
+    _buffer[_index-2] = '.';
+    _buffer[_index-1] = '.';
+  }
+  // terminate string
+  _buffer[_index] = '\0';
+}
+
+void ObjectDescriptionBuilder::reset() {
+  _index = 0;
+  _buffer[0] = '\0';
+}
+
+void ObjectDescriptionBuilder::print_description(outputStream* out) {
+  out->print("%s", (const char*)_buffer);
+}
+
+const char* ObjectDescriptionBuilder::description() {
+  if (_buffer[0] == '\0') {
+    return NULL;
+  }
+  const size_t len = strlen(_buffer);
+  char* copy = NEW_RESOURCE_ARRAY(char, len + 1);
+  assert(copy != NULL, "invariant");
+  strncpy(copy, _buffer, len + 1);
+  return copy;
+}
+
+ObjectSampleDescription::ObjectSampleDescription(oop object) :
+  _object(object) {
+}
+
+void ObjectSampleDescription::ensure_initialized() {
+  if (symbol_size == NULL) {
+    symbol_size = SymbolTable::new_permanent_symbol("size", Thread::current());
+  }
+}
+
+void ObjectSampleDescription::print_description(outputStream* out) {
+  write_object_to_buffer();
+  _description.print_description(out);
+}
+
+const char* ObjectSampleDescription::description() {
+  write_object_to_buffer();
+  return _description.description();
+}
+
+void ObjectSampleDescription::write_text(const char* text) {
+  _description.write_text(text);
+}
+
+void ObjectSampleDescription::write_int(jint value) {
+  _description.write_int(value);
+}
+
+void ObjectSampleDescription::write_object_to_buffer() {
+  ensure_initialized();
+  _description.reset();
+  write_object_details();
+}
+
+void ObjectSampleDescription::write_object_details() {
+  Klass* klass = _object->klass();
+  Symbol* class_name = klass->name();
+  jint size;
+
+  if (_object->is_a(SystemDictionary::Class_klass())) {
+    write_class_name();
+    return;
+  }
+
+  if (_object->is_a(SystemDictionary::Thread_klass())) {
+    write_thread_name();
+    return;
+  }
+
+  if (_object->is_a(SystemDictionary::ThreadGroup_klass())) {
+    write_thread_group_name();
+    return;
+  }
+
+  if (read_int_size(&size)) {
+    write_size(size);
+    return;
+  }
+}
+
+void ObjectSampleDescription::write_class_name() {
+  assert(_object->is_a(SystemDictionary::Class_klass()), "invariant");
+  Klass* const k = java_lang_Class::as_Klass(_object);
+  if (k == NULL) {
+    // might represent a primitive
+    const Klass* const ak = java_lang_Class::array_klass(_object);
+    // If ak is NULL, this is most likely a mirror associated with a
+    // jvmti redefine/retransform scratch klass. We can't get any additional
+    // information from it.
+    if (ak != NULL) {
+      write_text(type2name(java_lang_Class::primitive_type(_object)));
+    }
+    return;
+  }
+
+  if (k->oop_is_instance()) {
+    const InstanceKlass* ik = InstanceKlass::cast(k);
+    if (ik->is_anonymous()) {
+      return;
+    }
+    assert(!ik->is_anonymous(), "invariant");
+    const Symbol* name = ik->name();
+    if (name != NULL) {
+      write_text("Class Name: ");
+      write_text(name->as_klass_external_name());
+    }
+  }
+}
+
+void ObjectSampleDescription::write_thread_group_name() {
+  assert(_object->is_a(SystemDictionary::ThreadGroup_klass()), "invariant");
+  typeArrayOop tg_name = java_lang_ThreadGroup::name(_object);
+  if (tg_name != NULL) {
+    write_text("Thread Group: ");
+    write_text(UNICODE::as_utf8((jchar*) tg_name->base(T_CHAR), tg_name->length()));
+  }
+}
+
+void ObjectSampleDescription::write_thread_name() {
+  assert(_object->is_a(SystemDictionary::Thread_klass()), "invariant");
+  oop name = java_lang_Thread::name(_object);
+  if (name != NULL) {
+    char* p = java_lang_String::as_utf8_string(name);
+    if (p != NULL) {
+      write_text("Thread Name: ");
+      write_text(p);
+    }
+  }
+}
+
+void ObjectSampleDescription::write_size(jint size) {
+  if (size >= 0) {
+    write_text("Size: ");
+    write_int(size);
+  }
+}
+
+bool ObjectSampleDescription::read_int_size(jint* result_size) {
+  fieldDescriptor fd;
+  Klass* klass = _object->klass();
+  if (klass->oop_is_instance()) {
+    InstanceKlass* ik = InstanceKlass::cast(klass);
+    if (ik->find_field(symbol_size, vmSymbols::int_signature(), false, &fd) != NULL) {
+       jint size = _object->int_field(fd.offset());
+       *result_size = size;
+       return true;
+    }
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEDESCRIPTION_HPP
+#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEDESCRIPTION_HPP
+
+#define OBJECT_SAMPLE_DESCRIPTION_BUFFER_SIZE 100
+
+#include "memory/allocation.hpp"
+
+class outputStream;
+
+class ObjectDescriptionBuilder : public StackObj {
+private:
+  char _buffer[OBJECT_SAMPLE_DESCRIPTION_BUFFER_SIZE];
+  size_t _index;
+
+public:
+  ObjectDescriptionBuilder();
+
+  void write_text(const char* text);
+  void write_int(jint value);
+  void reset();
+
+  void print_description(outputStream* out);
+  const char* description();
+};
+
+class ObjectSampleDescription : public StackObj {
+private:
+  ObjectDescriptionBuilder _description;
+  oop _object;
+
+  void write_text(const char* text);
+  void write_int(jint value);
+
+  void write_object_details();
+  void write_size(jint size);
+  void write_thread_name();
+  void write_thread_group_name();
+  void write_class_name();
+  void write_object_to_buffer();
+  bool is_class(Symbol* s1, const char* s2);
+  void ensure_initialized();
+  bool read_int_size(jint* result);
+
+public:
+  ObjectSampleDescription(oop object);
+  void print_description(outputStream* out);
+  const char* description();
+};
+
+#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEDESCRIPTION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfrfiles/jfrTypes.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/edgeUtils.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleDescription.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp"
+#include "jfr/leakprofiler/checkpoint/rootResolver.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/leakprofiler/utilities/rootType.hpp"
+#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/symbol.hpp"
+#include "utilities/growableArray.hpp"
+
+template <typename Data>
+class ObjectSampleAuxInfo : public ResourceObj {
+ public:
+  Data _data;
+  traceid _id;
+  ObjectSampleAuxInfo() : _data(), _id(0) {}
+};
+
+class ObjectSampleArrayData {
+ public:
+  int _array_size;
+  int _array_index;
+  ObjectSampleArrayData() : _array_size(0), _array_index(0) {}
+};
+
+class ObjectSampleFieldInfo : public ResourceObj {
+ public:
+  const Symbol* _field_name_symbol;
+  jshort _field_modifiers;
+  ObjectSampleFieldInfo() : _field_name_symbol(NULL), _field_modifiers(0) {}
+};
+
+class ObjectSampleRootDescriptionData {
+ public:
+  const Edge* _root_edge;
+  const char* _description;
+  OldObjectRoot::System _system;
+  OldObjectRoot::Type _type;
+  ObjectSampleRootDescriptionData() : _root_edge(NULL),
+                                      _description(NULL),
+                                      _system(OldObjectRoot::_system_undetermined),
+                                      _type(OldObjectRoot::_type_undetermined) {}
+};
+
+class OldObjectSampleData {
+ public:
+  oop _object;
+  traceid _reference_id;
+};
+
+class ReferenceData {
+ public:
+  traceid _field_info_id;
+  traceid _array_info_id;
+  traceid _old_object_sample_id;
+  size_t  _skip;
+};
+
+static int initial_storage_size = 16;
+
+template <typename Data>
+class SampleSet : public ResourceObj {
+ private:
+  GrowableArray<Data>* _storage;
+ public:
+  SampleSet() : _storage(NULL) {}
+
+  traceid store(Data data) {
+    assert(data != NULL, "invariant");
+    if (_storage == NULL) {
+      _storage = new GrowableArray<Data>(initial_storage_size);
+    }
+    assert(_storage != NULL, "invariant");
+    assert(_storage->find(data) == -1, "invariant");
+    _storage->append(data);
+    return data->_id;
+  }
+
+  size_t size() const {
+    return _storage != NULL ? (size_t)_storage->length() : 0;
+  }
+
+  template <typename Functor>
+  void iterate(Functor& functor) {
+    if (_storage != NULL) {
+      for (int i = 0; i < _storage->length(); ++i) {
+        functor(_storage->at(i));
+      }
+    }
+  }
+
+  const GrowableArray<Data>& storage() const {
+    return *_storage;
+  }
+};
+
+typedef ObjectSampleAuxInfo<ObjectSampleArrayData> ObjectSampleArrayInfo;
+typedef ObjectSampleAuxInfo<ObjectSampleRootDescriptionData> ObjectSampleRootDescriptionInfo;
+typedef ObjectSampleAuxInfo<OldObjectSampleData> OldObjectSampleInfo;
+typedef ObjectSampleAuxInfo<ReferenceData> ReferenceInfo;
+
+class FieldTable : public ResourceObj {
+  template <typename,
+            typename,
+            template<typename, typename> class,
+            typename,
+            size_t>
+  friend class HashTableHost;
+  typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, Entry, FieldTable, 109> FieldInfoTable;
+ public:
+  typedef FieldInfoTable::HashEntry FieldInfoEntry;
+
+ private:
+  static traceid _field_id_counter;
+  FieldInfoTable* _table;
+
+  void assign_id(FieldInfoEntry* entry) {
+    assert(entry != NULL, "invariant");
+    entry->set_id(++_field_id_counter);
+  }
+
+  bool equals(const ObjectSampleFieldInfo* query, uintptr_t hash, const FieldInfoEntry* entry) {
+    assert(hash == entry->hash(), "invariant");
+    assert(query != NULL, "invariant");
+    const ObjectSampleFieldInfo* stored = entry->literal();
+    assert(stored != NULL, "invariant");
+    assert(((Symbol*)stored->_field_name_symbol)->identity_hash() == ((Symbol*)query->_field_name_symbol)->identity_hash(), "invariant");
+    return stored->_field_modifiers == query->_field_modifiers;
+  }
+
+ public:
+  FieldTable() : _table(new FieldInfoTable(this)) {}
+  ~FieldTable() {
+    assert(_table != NULL, "invariant");
+    delete _table;
+  }
+
+  traceid store(const ObjectSampleFieldInfo* field_info) {
+    assert(field_info != NULL, "invariant");
+    const FieldInfoEntry& entry =_table->lookup_put(field_info,
+                                                    ((Symbol*)field_info->_field_name_symbol)->identity_hash());
+    return entry.id();
+  }
+
+  size_t size() const {
+    return _table->cardinality();
+  }
+
+  template <typename T>
+  void iterate(T& functor) const {
+    _table->iterate_entry<T>(functor);
+  }
+};
+
+traceid FieldTable::_field_id_counter = 0;
+
+typedef SampleSet<const OldObjectSampleInfo*> SampleInfo;
+typedef SampleSet<const ReferenceInfo*> RefInfo;
+typedef SampleSet<const ObjectSampleArrayInfo*> ArrayInfo;
+typedef SampleSet<const ObjectSampleRootDescriptionInfo*> RootDescriptionInfo;
+
+static SampleInfo* sample_infos = NULL;
+static RefInfo* ref_infos = NULL;
+static ArrayInfo* array_infos = NULL;
+static FieldTable* field_infos = NULL;
+static RootDescriptionInfo* root_infos = NULL;
+
+int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* si) {
+  assert(writer != NULL, "invariant");
+  assert(si != NULL, "invariant");
+  const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si;
+  oop object = oosi->_data._object;
+  assert(object != NULL, "invariant");
+  writer->write(oosi->_id);
+  writer->write((u8)(const HeapWord*)object);
+  writer->write(const_cast<const Klass*>(object->klass()));
+  ObjectSampleDescription od(object);
+  writer->write(od.description());
+  writer->write(oosi->_data._reference_id);
+  return 1;
+}
+
+typedef JfrArtifactWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl;
+typedef JfrArtifactWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
+
+static void write_sample_infos(JfrCheckpointWriter& writer) {
+  if (sample_infos != NULL) {
+    SampleWriter sw(&writer, NULL, false);
+    sample_infos->iterate(sw);
+  }
+}
+
+int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ri) {
+  assert(writer != NULL, "invariant");
+  assert(ri != NULL, "invariant");
+  const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri;
+  writer->write(ref_info->_id);
+  writer->write(ref_info->_data._array_info_id);
+  writer->write(ref_info->_data._field_info_id);
+  writer->write(ref_info->_data._old_object_sample_id);
+  writer->write<s4>((s4)ref_info->_data._skip);
+  return 1;
+}
+
+typedef JfrArtifactWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl;
+typedef JfrArtifactWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
+
+static void write_reference_infos(JfrCheckpointWriter& writer) {
+  if (ref_infos != NULL) {
+    ReferenceWriter rw(&writer, NULL, false);
+    ref_infos->iterate(rw);
+  }
+}
+
+int __write_array_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ai) {
+  assert(writer != NULL, "invariant");
+  assert(ai != NULL, "invariant");
+  const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai;
+  writer->write(osai->_id);
+  writer->write(osai->_data._array_size);
+  writer->write(osai->_data._array_index);
+  return 1;
+}
+
+static traceid get_array_info_id(const Edge& edge, traceid id) {
+  if (edge.is_root() || !EdgeUtils::is_array_element(edge)) {
+    return 0;
+  }
+  if (array_infos == NULL) {
+    array_infos = new ArrayInfo();
+  }
+  assert(array_infos != NULL, "invariant");
+
+  ObjectSampleArrayInfo* const osai = new ObjectSampleArrayInfo();
+  assert(osai != NULL, "invariant");
+  osai->_id = id;
+  osai->_data._array_size = EdgeUtils::array_size(edge);
+  osai->_data._array_index = EdgeUtils::array_index(edge);
+  return array_infos->store(osai);
+}
+
+typedef JfrArtifactWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl;
+typedef JfrArtifactWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
+
+static void write_array_infos(JfrCheckpointWriter& writer) {
+  if (array_infos != NULL) {
+    ArrayWriter aw(&writer, NULL, false);
+    array_infos->iterate(aw);
+  }
+}
+
+int __write_field_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* fi) {
+  assert(writer != NULL, "invariant");
+  assert(fi != NULL, "invariant");
+  const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi;
+  writer->write(field_info_entry->id());
+  const ObjectSampleFieldInfo* const osfi = field_info_entry->literal();
+  writer->write(osfi->_field_name_symbol->as_C_string());
+  writer->write(osfi->_field_modifiers);
+  return 1;
+}
+
+static traceid get_field_info_id(const Edge& edge) {
+  if (edge.is_root()) {
+    return 0;
+  }
+
+  assert(!EdgeUtils::is_array_element(edge), "invariant");
+  const Symbol* const field_name_symbol = EdgeUtils::field_name_symbol(edge);
+  if (field_name_symbol == NULL) {
+    return 0;
+  }
+
+  if (field_infos == NULL) {
+    field_infos = new FieldTable();
+  }
+  assert(field_infos != NULL, "invariant");
+
+  ObjectSampleFieldInfo* const osfi = new ObjectSampleFieldInfo();
+  assert(osfi != NULL, "invariant");
+  osfi->_field_name_symbol = field_name_symbol;
+  osfi->_field_modifiers = EdgeUtils::field_modifiers(edge);
+  return field_infos->store(osfi);
+}
+
+typedef JfrArtifactWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl;
+typedef JfrArtifactWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
+
+static void write_field_infos(JfrCheckpointWriter& writer) {
+  if (field_infos != NULL) {
+    FieldWriter fw(&writer, NULL, false);
+    field_infos->iterate(fw);
+  }
+}
+
+static const char* description(const ObjectSampleRootDescriptionInfo* osdi) {
+  assert(osdi != NULL, "invariant");
+
+  if (osdi->_data._description == NULL) {
+    return NULL;
+  }
+
+  ObjectDescriptionBuilder description;
+  if (osdi->_data._system == OldObjectRoot::_threads) {
+    description.write_text("Thread Name: ");
+  }
+  description.write_text(osdi->_data._description);
+  return description.description();
+}
+
+int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* di) {
+  assert(writer != NULL, "invariant");
+  assert(di != NULL, "invariant");
+  const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di;
+  writer->write(osdi->_id);
+  writer->write(description(osdi));
+  writer->write<u8>(osdi->_data._system);
+  writer->write<u8>(osdi->_data._type);
+  return 1;
+}
+
+static traceid get_root_description_info_id(const Edge& edge, traceid id) {
+  assert(edge.is_root(), "invariant");
+  if (EdgeUtils::is_leak_edge(edge)) {
+    return 0;
+  }
+
+  if (root_infos == NULL) {
+    root_infos = new RootDescriptionInfo();
+  }
+  assert(root_infos != NULL, "invariant");
+  ObjectSampleRootDescriptionInfo* const oodi = new ObjectSampleRootDescriptionInfo();
+  oodi->_id = id;
+  oodi->_data._root_edge = &edge;
+  return root_infos->store(oodi);
+}
+
+typedef JfrArtifactWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl;
+typedef JfrArtifactWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter;
+
+
+int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) {
+  return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
+}
+
+int _root_desc_compare_(const ObjectSampleRootDescriptionInfo*const & lhs, const ObjectSampleRootDescriptionInfo* const& rhs) {
+  const uintptr_t lhs_ref = (uintptr_t)lhs->_data._root_edge->reference();
+  const uintptr_t rhs_ref = (uintptr_t)rhs->_data._root_edge->reference();
+  return _edge_reference_compare_(lhs_ref, rhs_ref);
+}
+
+static int find_sorted(const RootCallbackInfo& callback_info,
+                       const GrowableArray<const ObjectSampleRootDescriptionInfo*>* arr,
+                       int length,
+                       bool& found) {
+  assert(arr != NULL, "invariant");
+  assert(length >= 0, "invariant");
+  assert(length <= arr->length(), "invariant");
+
+  found = false;
+  int min = 0;
+  int max = length;
+  while (max >= min) {
+    const int mid = (int)(((uint)max + min) / 2);
+    int diff = _edge_reference_compare_((uintptr_t)callback_info._high,
+                                        (uintptr_t)arr->at(mid)->_data._root_edge->reference());
+    if (diff > 0) {
+      min = mid + 1;
+    } else if (diff < 0) {
+      max = mid - 1;
+    } else {
+      found = true;
+      return mid;
+    }
+  }
+  return min;
+}
+
+class RootResolutionSet : public ResourceObj, public RootCallback {
+ private:
+  GrowableArray<const ObjectSampleRootDescriptionInfo*>* _unresolved_roots;
+
+  const uintptr_t high() const {
+    return (uintptr_t)_unresolved_roots->top()->_data._root_edge->reference();
+  }
+
+  const uintptr_t low() const {
+    return (uintptr_t)_unresolved_roots->first()->_data._root_edge->reference();
+  }
+
+  bool in_set_address_range(const RootCallbackInfo& callback_info) const {
+    assert(callback_info._low == NULL, "invariant");
+    const uintptr_t addr = (uintptr_t)callback_info._high;
+    return low() <= addr && high() >= addr;
+  }
+
+  int compare_to_range(const RootCallbackInfo& callback_info) const {
+    assert(callback_info._high != NULL, "invariant");
+    assert(callback_info._low != NULL, "invariant");
+
+    for (int i = 0; i < _unresolved_roots->length(); ++i) {
+      const uintptr_t ref_addr = (uintptr_t)_unresolved_roots->at(i)->_data._root_edge->reference();
+      if ((uintptr_t)callback_info._low <= ref_addr && (uintptr_t)callback_info._high >= ref_addr) {
+        return i;
+      }
+    }
+    return -1;
+  }
+
+  int exact(const RootCallbackInfo& callback_info) const {
+    assert(callback_info._high != NULL, "invariant");
+    assert(in_set_address_range(callback_info), "invariant");
+
+    bool found;
+    const int idx = find_sorted(callback_info, _unresolved_roots, _unresolved_roots->length(), found);
+    return found ? idx : -1;
+  }
+
+  bool resolve_root(const RootCallbackInfo& callback_info, int idx) const {
+    assert(idx >= 0, "invariant");
+    assert(idx < _unresolved_roots->length(), "invariant");
+
+    ObjectSampleRootDescriptionInfo* const desc =
+      const_cast<ObjectSampleRootDescriptionInfo*>(_unresolved_roots->at(idx));
+    assert(desc != NULL, "invariant");
+    assert((uintptr_t)callback_info._high == (uintptr_t)desc->_data._root_edge->reference(), "invariant");
+
+    desc->_data._system = callback_info._system;
+    desc->_data._type = callback_info._type;
+
+    if (callback_info._system == OldObjectRoot::_threads) {
+      const JavaThread* jt = (const JavaThread*)callback_info._context;
+      assert(jt != NULL, "invariant");
+      desc->_data._description = jt->name();
+    }
+
+    _unresolved_roots->remove_at(idx);
+    return _unresolved_roots->is_empty();
+  }
+
+ public:
+  RootResolutionSet(RootDescriptionInfo* info) : _unresolved_roots(NULL) {
+    assert(info != NULL, "invariant");
+    // construct a sorted copy
+    const GrowableArray<const ObjectSampleRootDescriptionInfo*>& info_storage = info->storage();
+    const int length = info_storage.length();
+    _unresolved_roots = new GrowableArray<const ObjectSampleRootDescriptionInfo*>(length);
+    assert(_unresolved_roots != NULL, "invariant");
+
+    for (int i = 0; i < length; ++i) {
+      _unresolved_roots->insert_sorted<_root_desc_compare_>(info_storage.at(i));
+    }
+  }
+
+  bool process(const RootCallbackInfo& callback_info) {
+    if (NULL == callback_info._low) {
+      if (in_set_address_range(callback_info)) {
+        const int idx = exact(callback_info);
+        return idx == -1 ? false : resolve_root(callback_info, idx);
+      }
+      return false;
+    }
+    assert(callback_info._low != NULL, "invariant");
+    const int idx = compare_to_range(callback_info);
+    return idx == -1 ? false : resolve_root(callback_info, idx);
+  }
+
+  int entries() const {
+    return _unresolved_roots->length();
+  }
+
+  const void* at(int idx) const {
+    assert(idx >= 0, "invariant");
+    assert(idx < _unresolved_roots->length(), "invariant");
+    return _unresolved_roots->at(idx)->_data._root_edge->reference();
+  }
+};
+
+static void write_root_descriptors(JfrCheckpointWriter& writer) {
+  if (root_infos != NULL) {
+    // resolve roots
+    RootResolutionSet rrs(root_infos);
+    RootResolver::resolve(rrs);
+    // write roots
+    RootDescriptionWriter rw(&writer, NULL, false);
+    root_infos->iterate(rw);
+  }
+}
+
+static void add_old_object_sample_info(const Edge* current, traceid id) {
+  assert(current != NULL, "invariant");
+  if (sample_infos == NULL) {
+    sample_infos = new SampleInfo();
+  }
+  assert(sample_infos != NULL, "invariant");
+  OldObjectSampleInfo* const oosi = new OldObjectSampleInfo();
+  assert(oosi != NULL, "invariant");
+  oosi->_id = id;
+  oosi->_data._object = current->pointee();
+  oosi->_data._reference_id = current->is_root() ? (traceid)0 : id;
+  sample_infos->store(oosi);
+}
+
+static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) {
+  assert(current != NULL, "invariant");
+  if (ref_infos == NULL) {
+    ref_infos = new RefInfo();
+  }
+
+  assert(ref_infos != NULL, "invariant");
+  ReferenceInfo* const ri = new ReferenceInfo();
+  assert(ri != NULL, "invariant");
+
+  ri->_id = id;
+  ri->_data._array_info_id =  !current->is_skip_edge() ? get_array_info_id(*current, id) : 0;
+  ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ?
+                               get_field_info_id(*current) : (traceid)0;
+  ri->_data._old_object_sample_id = parent_id;
+  ri->_data._skip = current->skip_length();
+  ref_infos->store(ri);
+}
+
+static traceid add_root_info(const Edge* root, traceid id) {
+  assert(root != NULL, "invariant");
+  assert(root->is_root(), "invariant");
+  return get_root_description_info_id(*root, id);
+}
+
+void ObjectSampleWriter::write(const RoutableEdge* edge) {
+  assert(edge != NULL, "invariant");
+  const traceid id = _store->get_id(edge);
+  add_old_object_sample_info(edge, id);
+  const RoutableEdge* parent = edge->logical_parent();
+  if (parent != NULL) {
+    add_reference_info(edge, id, _store->get_id(parent));
+  } else {
+    assert(edge->is_root(), "invariant");
+    add_root_info(edge, id);
+  }
+}
+
+ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) :
+  _writer(writer),
+  _store(store) {
+  assert(store != NULL, "invariant");
+  assert(store->number_of_entries() > 0, "invariant");
+  sample_infos = NULL;
+  ref_infos = NULL;
+  array_infos = NULL;
+  field_infos = NULL;
+  root_infos = NULL;
+}
+
+ObjectSampleWriter::~ObjectSampleWriter() {
+  write_sample_infos(_writer);
+  write_reference_infos(_writer);
+  write_array_infos(_writer);
+  write_field_infos(_writer);
+  write_root_descriptors(_writer);
+}
+
+void ObjectSampleWriter::write_chain(const RoutableEdge& edge) {
+  assert(EdgeUtils::is_leak_edge(edge), "invariant");
+  if (edge.processed()) {
+    return;
+  }
+  EdgeUtils::collapse_chain(edge);
+  const RoutableEdge* current = &edge;
+  while (current != NULL) {
+    if (current->processed()) {
+      return;
+    }
+    write(current);
+    current->set_processed();
+    current = current->logical_parent();
+  }
+}
+
+bool ObjectSampleWriter::operator()(const RoutableEdge& edge) {
+  if (EdgeUtils::is_leak_edge(edge)) {
+    write_chain(edge);
+  }
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
+#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
+
+#include "memory/allocation.hpp"
+
+class Edge;
+class EdgeStore;
+class JfrCheckpointWriter;
+class RoutableEdge;
+
+class ObjectSampleWriter : public StackObj {
+ private:
+  JfrCheckpointWriter& _writer;
+  const EdgeStore* const _store;
+
+  void write(const RoutableEdge* edge);
+  void write_chain(const RoutableEdge& edge);
+
+ public:
+  ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store);
+  ~ObjectSampleWriter();
+
+  bool operator()(const RoutableEdge& edge);
+};
+
+#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+//#include "classfile/stringTable.hpp"
+//#include "gc_interface/strongRootsScope.hpp"
+#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
+#include "jfr/leakprofiler/checkpoint/rootResolver.hpp"
+#include "memory/iterator.hpp"
+#include "oops/klass.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "prims/privilegedStack.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/vframe_hp.hpp"
+#include "services/management.hpp"
+#include "utilities/growableArray.hpp"
+
+class ReferenceLocateClosure : public OopClosure {
+ protected:
+  RootCallback& _callback;
+  RootCallbackInfo _info;
+  bool _complete;
+
+  void do_oop_shared(const void* ref);
+
+ public:
+  ReferenceLocateClosure(RootCallback& callback,
+                         OldObjectRoot::System system,
+                         OldObjectRoot::Type type,
+                         const void* context) : _callback(callback),
+                                                _info(),
+                                                _complete(false) {
+    _info._high = NULL;
+    _info._low = NULL;
+    _info._system = system;
+    _info._type = type;
+    _info._context = context;
+  }
+
+  virtual void do_oop(oop* ref);
+  virtual void do_oop(narrowOop* ref);
+
+  bool complete() const {
+    return _complete;
+  }
+};
+
+void ReferenceLocateClosure::do_oop_shared(const void* ref) {
+  assert(ref != NULL, "invariant");
+  if (!_complete) {
+    _info._high = ref;
+    _complete = _callback.process(_info);
+  }
+}
+
+void ReferenceLocateClosure::do_oop(oop* ref) {
+  do_oop_shared(ref);
+}
+
+void ReferenceLocateClosure::do_oop(narrowOop* ref) {
+  do_oop_shared(ref);
+}
+
+class ReferenceToRootClosure : public StackObj {
+ private:
+  RootCallback& _callback;
+  RootCallbackInfo _info;
+  bool _complete;
+
+  bool do_cldg_roots();
+  bool do_object_synchronizer_roots();
+  bool do_universe_roots();
+  bool do_jni_handle_roots();
+  bool do_jvmti_roots();
+  bool do_system_dictionary_roots();
+  bool do_management_roots();
+  bool do_string_table_roots();
+//  bool do_aot_loader_roots();
+
+  bool do_roots();
+
+ public:
+  ReferenceToRootClosure(RootCallback& callback) : _callback(callback),
+                                                   _info(),
+                                                   _complete(false) {
+    _info._high = NULL;
+    _info._low = NULL;
+    _info._context = NULL;
+    _info._system = OldObjectRoot::_system_undetermined;
+    _info._type = OldObjectRoot::_type_undetermined;
+
+    assert_locked_or_safepoint(Threads_lock);
+    do_roots();
+  }
+
+  bool complete() const {
+    return _complete;
+  }
+};
+
+bool ReferenceToRootClosure::do_cldg_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL);
+  CLDToOopClosure cldt_closure(&rlc);
+  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
+  return rlc.complete();
+}
+
+bool ReferenceToRootClosure::do_object_synchronizer_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_object_synchronizer, OldObjectRoot::_type_undetermined, NULL);
+  ObjectSynchronizer::oops_do(&rlc);
+  return rlc.complete();
+}
+
+bool ReferenceToRootClosure::do_universe_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_universe, OldObjectRoot::_type_undetermined, NULL);
+  Universe::oops_do(&rlc);
+  return rlc.complete();
+}
+
+bool ReferenceToRootClosure::do_jni_handle_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_global_jni_handles, OldObjectRoot::_global_jni_handle, NULL);
+  JNIHandles::oops_do(&rlc);
+  return rlc.complete();
+}
+
+bool ReferenceToRootClosure::do_jvmti_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_jvmti, OldObjectRoot::_global_jni_handle, NULL);
+  JvmtiExport::oops_do(&rlc);
+  return rlc.complete();
+}
+
+bool ReferenceToRootClosure::do_system_dictionary_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_system_dictionary, OldObjectRoot::_type_undetermined, NULL);
+  SystemDictionary::oops_do(&rlc);
+  return rlc.complete();
+}
+
+bool ReferenceToRootClosure::do_management_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_management, OldObjectRoot::_type_undetermined, NULL);
+  Management::oops_do(&rlc);
+  return rlc.complete();
+}
+
+bool ReferenceToRootClosure::do_string_table_roots() {
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rlc(_callback, OldObjectRoot::_string_table, OldObjectRoot::_type_undetermined, NULL);
+  StringTable::oops_do(&rlc);
+  return rlc.complete();
+}
+
+//bool ReferenceToRootClosure::do_aot_loader_roots() {
+//  assert(!complete(), "invariant");
+//  ReferenceLocateClosure rcl(_callback, OldObjectRoot::_aot, OldObjectRoot::_type_undetermined, NULL);
+//  AOTLoader::oops_do(&rcl);
+//  return rcl.complete();
+//}
+
+bool ReferenceToRootClosure::do_roots() {
+  assert(!complete(), "invariant");
+  assert(OldObjectRoot::_system_undetermined == _info._system, "invariant");
+  assert(OldObjectRoot::_type_undetermined == _info._type, "invariant");
+
+  if (do_cldg_roots()) {
+    _complete = true;
+    return true;
+  }
+
+  if (do_object_synchronizer_roots()) {
+   _complete = true;
+    return true;
+  }
+
+  if (do_universe_roots()) {
+   _complete = true;
+    return true;
+  }
+
+  if (do_jni_handle_roots()) {
+   _complete = true;
+    return true;
+  }
+
+  if (do_jvmti_roots()) {
+   _complete = true;
+    return true;
+  }
+
+  if (do_system_dictionary_roots()) {
+   _complete = true;
+    return true;
+  }
+
+  if (do_management_roots()) {
+   _complete = true;
+    return true;
+  }
+
+  if (do_string_table_roots()) {
+   _complete = true;
+    return true;
+  }
+
+//  if (do_aot_loader_roots()) {
+//   _complete = true;
+//    return true;
+//  }
+
+  return false;
+}
+
+class ReferenceToThreadRootClosure : public StackObj {
+ private:
+  RootCallback& _callback;
+  bool _complete;
+
+  bool do_java_threads_oops(JavaThread* jt);
+  bool do_thread_roots(JavaThread* jt);
+  bool do_thread_stack_fast(JavaThread* jt);
+  bool do_thread_stack_detailed(JavaThread* jt);
+  bool do_thread_jni_handles(JavaThread* jt);
+  bool do_thread_handle_area(JavaThread* jt);
+
+ public:
+  ReferenceToThreadRootClosure(RootCallback& callback) :_callback(callback), _complete(false) {
+    assert_locked_or_safepoint(Threads_lock);
+    for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+      if (do_thread_roots(thread)) {
+        return;
+      }
+    }
+  }
+
+  bool complete() const {
+    return _complete;
+  }
+};
+
+bool ReferenceToThreadRootClosure::do_thread_handle_area(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  assert(!complete(), "invariant");
+  ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_handle_area, jt);
+  jt->handle_area()->oops_do(&rcl);
+  return rcl.complete();
+}
+
+bool ReferenceToThreadRootClosure::do_thread_jni_handles(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  assert(!complete(), "invariant");
+
+  ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_local_jni_handle, jt);
+  jt->active_handles()->oops_do(&rcl);
+  return rcl.complete();
+}
+
+bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  assert(!complete(), "invariant");
+
+  if (_callback.entries() == 0) {
+    _complete = true;
+    return true;
+  }
+
+  RootCallbackInfo info;
+  info._high = NULL;
+  info._low = NULL;
+  info._context = jt;
+  info._system = OldObjectRoot::_threads;
+  info._type = OldObjectRoot::_stack_variable;
+
+  for (int i = 0; i < _callback.entries(); ++i) {
+    const address adr = (address)_callback.at(i);
+    if (jt->is_in_usable_stack(adr)) {
+      info._high = adr;
+      _complete = _callback.process(info);
+      if (_complete) {
+        return true;
+      }
+    }
+  }
+  assert(!complete(), "invariant");
+  return false;
+}
+
+bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  assert(!complete(), "invariant");
+
+  ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_stack_variable, jt);
+
+  if (jt->has_last_Java_frame()) {
+    PrivilegedElement* const pelem = jt->privileged_stack_top();
+    if (pelem != NULL) {
+      pelem->oops_do(&rcl);
+      if (rcl.complete()) {
+        return true;
+      }
+    }
+
+    // traverse the registered growable array gc_array
+    // can't do this as it is not reachable from outside
+
+    // Traverse the monitor chunks
+    MonitorChunk* chunk = jt->monitor_chunks();
+    for (; chunk != NULL; chunk = chunk->next()) {
+      chunk->oops_do(&rcl);
+    }
+
+    if (rcl.complete()) {
+      return true;
+    }
+
+    // Traverse the execution stack
+    for (StackFrameStream fst(jt); !fst.is_done(); fst.next()) {
+      // XXX set CLDClosure to NULL
+      fst.current()->oops_do(&rcl, NULL, NULL, fst.register_map());
+    }
+
+  } // last java frame
+
+  if (rcl.complete()) {
+    return true;
+  }
+
+  GrowableArray<jvmtiDeferredLocalVariableSet*>* const list = jt->deferred_locals();
+  if (list != NULL) {
+    for (int i = 0; i < list->length(); i++) {
+      list->at(i)->oops_do(&rcl);
+    }
+  }
+
+  if (rcl.complete()) {
+    return true;
+  }
+
+  // Traverse instance variables at the end since the GC may be moving things
+  // around using this function
+  /*
+  * // can't reach these oop* from the outside
+  f->do_oop((oop*) &_threadObj);
+  f->do_oop((oop*) &_vm_result);
+  f->do_oop((oop*) &_exception_oop);
+  f->do_oop((oop*) &_pending_async_exception);
+  */
+
+  JvmtiThreadState* const jvmti_thread_state = jt->jvmti_thread_state();
+  if (jvmti_thread_state != NULL) {
+    jvmti_thread_state->oops_do(&rcl);
+  }
+
+  return rcl.complete();
+}
+
+bool ReferenceToThreadRootClosure::do_java_threads_oops(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  assert(!complete(), "invariant");
+
+  ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_global_jni_handle, jt);
+  jt->oops_do(&rcl, NULL, NULL);
+  return rcl.complete();
+}
+
+bool ReferenceToThreadRootClosure::do_thread_roots(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+
+  if (do_thread_stack_fast(jt)) {
+    _complete = true;
+    return true;
+  }
+
+  if (do_thread_jni_handles(jt)) {
+    _complete = true;
+    return true;
+  }
+
+  if (do_thread_handle_area(jt)) {
+    _complete = true;
+    return true;
+  }
+
+  if (do_thread_stack_detailed(jt)) {
+    _complete = true;
+    return true;
+  }
+
+  return false;
+}
+
+class RootResolverMarkScope : public MarkingCodeBlobClosure::MarkScope {
+};
+
+void RootResolver::resolve(RootCallback& callback) {
+
+  // Need to clear cld claim bit before starting
+  ClassLoaderDataGraph::clear_claimed_marks();
+  RootResolverMarkScope mark_scope;
+
+  // thread local roots
+  ReferenceToThreadRootClosure rtrc(callback);
+  if (rtrc.complete()) {
+    return;
+  }
+  // system global roots
+  ReferenceToRootClosure rrc(callback);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/leakprofiler/utilities/rootType.hpp"
+#include "oops/oopsHierarchy.hpp"
+
+struct RootCallbackInfo {
+  const void* _high;
+  const void* _low;
+  const void* _context;
+  OldObjectRoot::System _system;
+  OldObjectRoot::Type _type;
+};
+
+class RootCallback {
+ public:
+  virtual bool process(const RootCallbackInfo& info) = 0;
+  virtual int entries() const = 0;
+  virtual const void* at(int idx) const = 0;
+};
+
+class RootResolver : public AllStatic {
+ public:
+  static void resolve(RootCallback& callback);
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/emitEventOperation.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/leakprofiler/utilities/granularTimer.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeQueue.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/leakprofiler/emitEventOperation.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/support/jfrThreadId.hpp"
+#include "memory/resourceArea.hpp"
+#include "memory/universe.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+/* The EdgeQueue is backed by directly managed virtual memory.
+ * We will attempt to dimension an initial reservation
+ * in proportion to the size of the heap (represented by heap_region).
+ * Initial memory reservation: 5% of the heap OR at least 32 Mb
+ * Commit ratio: 1 : 10 (subject to allocation granularties)
+ */
+static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
+  const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
+  assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
+  return memory_reservation_bytes;
+}
+
+static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
+  const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
+  assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
+  return memory_commit_block_size_bytes;
+}
+
+static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
+  if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
+  if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
+  if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
+  if (edge_queue.reserved_size() > 0) {
+    if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n",
+      ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
+  }
+}
+
+void EmitEventOperation::doit() {
+  assert(LeakProfiler::is_running(), "invariant");
+  _object_sampler = LeakProfiler::object_sampler();
+  assert(_object_sampler != NULL, "invariant");
+
+  _vm_thread = VMThread::vm_thread();
+  assert(_vm_thread == Thread::current(), "invariant");
+  _vm_thread_local = _vm_thread->jfr_thread_local();
+  assert(_vm_thread_local != NULL, "invariant");
+  assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
+
+  // The VM_Operation::evaluate() which invoked doit()
+  // contains a top level ResourceMark
+
+  // save the original markWord for the potential leak objects
+  // to be restored on function exit
+  ObjectSampleMarker marker;
+  if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
+    return;
+  }
+
+  EdgeStore edge_store;
+
+  GranularTimer::start(_cutoff_ticks, 1000000);
+  if (_cutoff_ticks <= 0) {
+    // no chains
+    write_events(&edge_store);
+    return;
+  }
+
+  assert(_cutoff_ticks > 0, "invariant");
+
+  // The bitset used for marking is dimensioned as a function of the heap size
+  const MemRegion heap_region = Universe::heap()->reserved_region();
+  BitSet mark_bits(heap_region);
+
+  // The edge queue is dimensioned as a fraction of the heap size
+  const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
+  EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
+
+  // The initialize() routines will attempt to reserve and allocate backing storage memory.
+  // Failure to accommodate will render root chain processing impossible.
+  // As a fallback on failure, just write out the existing samples, flat, without chains.
+  if (!(mark_bits.initialize() && edge_queue.initialize())) {
+    if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing");
+    write_events(&edge_store);
+    return;
+  }
+
+  // necessary condition for attempting a root set iteration
+  Universe::heap()->ensure_parsability(false);
+
+  RootSetClosure::add_to_queue(&edge_queue);
+  if (edge_queue.is_full()) {
+    // Pathological case where roots don't fit in queue
+    // Do a depth-first search, but mark roots first
+    // to avoid walking sideways over roots
+    DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
+  } else {
+    BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
+    bfs.process();
+  }
+  GranularTimer::stop();
+  write_events(&edge_store);
+  log_edge_queue_summary(edge_queue);
+}
+
+int EmitEventOperation::write_events(EdgeStore* edge_store) {
+  assert(_object_sampler != NULL, "invariant");
+  assert(edge_store != NULL, "invariant");
+  assert(_vm_thread != NULL, "invariant");
+  assert(_vm_thread_local != NULL, "invariant");
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+
+  // save thread id in preparation for thread local trace data manipulations
+  const traceid vmthread_id = _vm_thread_local->thread_id();
+  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
+
+  const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
+  int count = 0;
+
+  for (int i = 0; i < _object_sampler->item_count(); ++i) {
+    const ObjectSample* sample = _object_sampler->item_at(i);
+    if (sample->is_alive_and_older_than(last_sweep)) {
+      write_event(sample, edge_store);
+      ++count;
+    }
+  }
+
+  // restore thread local stack trace and thread id
+  _vm_thread_local->set_thread_id(vmthread_id);
+  _vm_thread_local->clear_cached_stack_trace();
+  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
+
+  if (count > 0) {
+    // serialize assoicated checkpoints
+    ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
+  }
+  return count;
+}
+
+static int array_size(const oop object) {
+  assert(object != NULL, "invariant");
+  if (object->is_array()) {
+    return arrayOop(object)->length();
+  }
+  return -1;
+}
+
+void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
+  assert(sample != NULL, "invariant");
+  assert(!sample->is_dead(), "invariant");
+  assert(edge_store != NULL, "invariant");
+  assert(_vm_thread_local != NULL, "invariant");
+  const oop* object_addr = sample->object_addr();
+  assert(*object_addr != NULL, "invariant");
+
+  const Edge* edge = (const Edge*)(*object_addr)->mark();
+  traceid gc_root_id = 0;
+  if (edge == NULL) {
+    // In order to dump out a representation of the event
+    // even though it was not reachable / too long to reach,
+    // we need to register a top level edge for this object
+    Edge e(NULL, object_addr);
+    edge_store->add_chain(&e, 1);
+    edge = (const Edge*)(*object_addr)->mark();
+  } else {
+    gc_root_id = edge_store->get_root_id(edge);
+  }
+
+  assert(edge != NULL, "invariant");
+  assert(edge->pointee() == *object_addr, "invariant");
+  const traceid object_id = edge_store->get_id(edge);
+  assert(object_id != 0, "invariant");
+
+  EventOldObjectSample e(UNTIMED);
+  e.set_starttime(GranularTimer::start_time());
+  e.set_endtime(GranularTimer::end_time());
+  e.set_allocationTime(sample->allocation_time());
+  e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
+  e.set_object(object_id);
+  e.set_arrayElements(array_size(*object_addr));
+  e.set_root(gc_root_id);
+
+  // Temporarily assigning both the stack trace id and thread id
+  // onto the thread local data structure of the VMThread (for the duration
+  // of the commit() call). This trick provides a means to override
+  // the event generation mechanism by injecting externally provided id's.
+  // Here, in particular, this allows us to emit an old object event
+  // supplying information from where the actual sampling occurred.
+  _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
+  assert(sample->has_thread(), "invariant");
+  _vm_thread_local->set_thread_id(sample->thread_id());
+  e.commit();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/emitEventOperation.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP
+#define SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP
+
+#include "runtime/vm_operations.hpp"
+
+class BFSClosure;
+class EdgeStore;
+class EdgeQueue;
+class JfrThreadData;
+class ObjectSample;
+class ObjectSampler;
+
+// Safepoint operation for emitting object sample events
+class EmitEventOperation : public VM_Operation {
+ private:
+  jlong _cutoff_ticks;
+  bool _emit_all;
+  VMThread* _vm_thread;
+  JfrThreadLocal* _vm_thread_local;
+  ObjectSampler* _object_sampler;
+
+  void write_event(const ObjectSample* sample, EdgeStore* edge_store);
+  int write_events(EdgeStore* edge_store);
+
+ public:
+  EmitEventOperation(jlong cutoff_ticks, bool emit_all) :
+    _cutoff_ticks(cutoff_ticks),
+    _emit_all(emit_all),
+    _vm_thread(NULL),
+    _vm_thread_local(NULL),
+    _object_sampler(NULL) {
+  }
+
+  VMOp_Type type() const {
+    return VMOp_GC_HeapInspection;
+  }
+
+  Mode evaluation_mode() const {
+    return _safepoint;
+  }
+
+  virtual void doit();
+};
+
+#endif // SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/leakProfiler.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/emitEventOperation.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/startOperation.hpp"
+#include "jfr/leakprofiler/stopOperation.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "memory/iterator.hpp"
+#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/ostream.hpp"
+
+// Only to be updated during safepoint
+ObjectSampler* LeakProfiler::_object_sampler = NULL;
+
+static volatile jbyte suspended = 0;
+bool LeakProfiler::start(jint sample_count) {
+  if (_object_sampler != NULL) {
+    // already started
+    return true;
+  }
+  // Allows user to disable leak profiler on command line by setting queue size to zero.
+  if (sample_count > 0) {
+    StartOperation op(sample_count);
+    VMThread::execute(&op);
+    return _object_sampler != NULL;
+  }
+  return false;
+}
+
+bool LeakProfiler::stop() {
+  if (_object_sampler == NULL) {
+    // already stopped/not started
+    return true;
+  }
+  StopOperation op;
+  VMThread::execute(&op);
+  return _object_sampler == NULL;
+}
+
+void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) {
+  if (!is_running()) {
+    return;
+  }
+  EmitEventOperation op(cutoff_ticks, emit_all);
+  VMThread::execute(&op);
+}
+
+void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  assert(SafepointSynchronize::is_at_safepoint(),
+    "Leak Profiler::oops_do(...) may only be called during safepoint");
+
+  if (_object_sampler != NULL) {
+    _object_sampler->oops_do(is_alive, f);
+  }
+}
+
+void LeakProfiler::sample(HeapWord* object,
+                          size_t size,
+                          JavaThread* thread) {
+  assert(is_running(), "invariant");
+  assert(thread != NULL, "invariant");
+  assert(thread->thread_state() == _thread_in_vm, "invariant");
+
+  // exclude compiler threads and code sweeper thread
+  if (thread->is_hidden_from_external_view()) {
+    return;
+  }
+
+  _object_sampler->add(object, size, thread);
+}
+
+ObjectSampler* LeakProfiler::object_sampler() {
+  assert(is_suspended() || SafepointSynchronize::is_at_safepoint(),
+    "Leak Profiler::object_sampler() may only be called during safepoint");
+  return _object_sampler;
+}
+
+void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) {
+  assert(SafepointSynchronize::is_at_safepoint(),
+    "Leak Profiler::set_object_sampler() may only be called during safepoint");
+  _object_sampler = object_sampler;
+}
+
+bool LeakProfiler::is_running() {
+  return _object_sampler != NULL && !suspended;
+}
+
+bool LeakProfiler::is_suspended() {
+  return _object_sampler != NULL && suspended;
+}
+
+void LeakProfiler::resume() {
+  assert(is_suspended(), "invariant");
+  OrderAccess::storestore();
+  Atomic::store((jbyte)0, &suspended);
+  assert(is_running(), "invariant");
+}
+
+void LeakProfiler::suspend() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(_object_sampler != NULL, "invariant");
+  assert(!is_suspended(), "invariant");
+  suspended = (jbyte)1; // safepoint visible
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/leakProfiler.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_LEAKPROFILER_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_LEAKPROFILER_HPP
+
+#include "memory/allocation.hpp"
+
+class BoolObjectClosure;
+class ObjectSampler;
+class OopClosure;
+class Thread;
+
+class LeakProfiler : public AllStatic {
+  friend class ClassUnloadTypeSet;
+  friend class EmitEventOperation;
+  friend class ObjectSampleCheckpoint;
+  friend class StartOperation;
+  friend class StopOperation;
+  friend class TypeSet;
+  friend class WriteObjectSampleStacktrace;
+
+ private:
+  static ObjectSampler* _object_sampler;
+
+  static void set_object_sampler(ObjectSampler* object_sampler);
+  static ObjectSampler* object_sampler();
+
+  static void suspend();
+  static void resume();
+  static bool is_suspended();
+
+ public:
+  static bool start(jint sample_count);
+  static bool stop();
+  static void emit_events(jlong cutoff_ticks, bool emit_all);
+  static bool is_running();
+
+  static void sample(HeapWord* object, size_t size, JavaThread* thread);
+
+  // Called by GC
+  static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_LEAKPROFILER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/sampling/objectSample.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
+
+#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+#include "utilities/ticks.hpp"
+/*
+ * Handle for diagnosing Java memory leaks.
+ *
+ * The class tracks the time the object was
+ * allocated, the thread and the stack trace.
+ */
+class ObjectSample : public JfrCHeapObj {
+  friend class ObjectSampler;
+  friend class SampleList;
+ private:
+  ObjectSample* _next;
+  ObjectSample* _previous;
+  JfrCheckpointBlobHandle _thread_cp;
+  JfrCheckpointBlobHandle _klass_cp;
+  oop _object;
+  Ticks _allocation_time;
+  traceid _stack_trace_id;
+  traceid _thread_id;
+  int _index;
+  size_t _span;
+  size_t _allocated;
+  size_t _heap_used_at_last_gc;
+  unsigned int _stack_trace_hash;
+  bool _dead;
+
+  void set_dead() {
+    _dead = true;
+  }
+
+  void release_references() {
+    if (_thread_cp.valid()) {
+      _thread_cp.~JfrCheckpointBlobHandle();
+    }
+    if (_klass_cp.valid()) {
+      _klass_cp.~JfrCheckpointBlobHandle();
+    }
+  }
+
+  void reset() {
+    set_stack_trace_id(0);
+    set_stack_trace_hash(0),
+    release_references();
+    _dead = false;
+  }
+
+ public:
+  ObjectSample() : _next(NULL),
+                   _previous(NULL),
+                   _thread_cp(),
+                   _klass_cp(),
+                   _object(NULL),
+                   _allocation_time(),
+                   _stack_trace_id(0),
+                   _thread_id(0),
+                   _index(0),
+                   _span(0),
+                   _allocated(0),
+                   _heap_used_at_last_gc(0),
+                   _stack_trace_hash(0),
+                   _dead(false) {}
+
+  ObjectSample* next() const {
+    return _next;
+  }
+
+  void set_next(ObjectSample* next) {
+    _next = next;
+  }
+
+  ObjectSample* prev() const {
+    return _previous;
+  }
+
+  void set_prev(ObjectSample* prev) {
+    _previous = prev;
+  }
+
+  bool is_dead() const {
+    return _dead;
+  }
+
+  const oop object() const {
+    return _object;
+  }
+
+  const oop* object_addr() const {
+    return &_object;
+  }
+
+  void set_object(oop object) {
+    _object = object;
+  }
+
+  const Klass* klass() const {
+    assert(_object != NULL, "invariant");
+    return _object->klass();
+  }
+
+  int index() const {
+    return _index;
+  }
+
+  void set_index(int index) {
+    _index = index;
+  }
+
+  size_t span() const {
+    return _span;
+  }
+
+  void set_span(size_t span) {
+    _span = span;
+  }
+
+  void add_span(size_t span) {
+    _span += span;
+  }
+
+  size_t allocated() const {
+    return _allocated;
+  }
+
+  void set_allocated(size_t size) {
+    _allocated = size;
+  }
+
+  const Ticks& allocation_time() const {
+    return _allocation_time;
+  }
+
+  const void set_allocation_time(const JfrTicks& time) {
+    _allocation_time = Ticks(time.value());
+  }
+
+  void set_heap_used_at_last_gc(size_t heap_used) {
+    _heap_used_at_last_gc = heap_used;
+  }
+
+  size_t heap_used_at_last_gc() const {
+    return _heap_used_at_last_gc;
+  }
+
+  bool has_stack_trace() const {
+    return stack_trace_id() != 0;
+  }
+
+  traceid stack_trace_id() const {
+    return _stack_trace_id;
+  }
+
+  void set_stack_trace_id(traceid id) {
+    _stack_trace_id = id;
+  }
+
+  unsigned int stack_trace_hash() const {
+    return _stack_trace_hash;
+  }
+
+  void set_stack_trace_hash(unsigned int hash) {
+    _stack_trace_hash = hash;
+  }
+
+  bool has_thread() const {
+    return _thread_id != 0;
+  }
+
+  traceid thread_id() const {
+    return _thread_id;
+  }
+
+  void set_thread_id(traceid id) {
+    _thread_id = id;
+  }
+
+  bool is_alive_and_older_than(jlong time_stamp) const {
+    return !is_dead() && (JfrTime::is_ft_enabled() ?
+      _allocation_time.ft_value() : _allocation_time.value()) < time_stamp;
+  }
+
+  const JfrCheckpointBlobHandle& thread_checkpoint() const {
+    return _thread_cp;
+  }
+
+  bool has_thread_checkpoint() const {
+    return _thread_cp.valid();
+  }
+
+  // JfrCheckpointBlobHandle assignment operator
+  // maintains proper reference counting
+  void set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
+    if (_thread_cp != ref) {
+      _thread_cp = ref;
+    }
+  }
+
+  const JfrCheckpointBlobHandle& klass_checkpoint() const {
+    return _klass_cp;
+  }
+
+  bool has_klass_checkpoint() const {
+    return _klass_cp.valid();
+  }
+
+  void set_klass_checkpoint(const JfrCheckpointBlobHandle& ref) {
+    if (_klass_cp != ref) {
+      if (_klass_cp.valid()) {
+        _klass_cp->set_next(ref);
+        return;
+      }
+      _klass_cp = ref;
+    }
+  }
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/leakprofiler/sampling/sampleList.hpp"
+#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp"
+#include "jfr/recorder/jfrEventSetting.inline.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "jfr/utilities/jfrTryLock.hpp"
+#include "memory/universe.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/thread.hpp"
+
+ObjectSampler::ObjectSampler(size_t size) :
+  _priority_queue(new SamplePriorityQueue(size)),
+  _list(new SampleList(size)),
+  _last_sweep(JfrTicks::now()),
+  _total_allocated(0),
+  _threshold(0),
+  _size(size),
+  _tryLock(0),
+  _dead_samples(false) {}
+
+ObjectSampler::~ObjectSampler() {
+  delete _priority_queue;
+  _priority_queue = NULL;
+  delete _list;
+  _list = NULL;
+}
+
+void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
+  assert(thread != NULL, "invariant");
+  const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0;
+  if (thread_id == 0) {
+    return;
+  }
+  assert(thread_id != 0, "invariant");
+
+  if (!thread->jfr_thread_local()->has_thread_checkpoint()) {
+    JfrCheckpointManager::create_thread_checkpoint(thread);
+    assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+  }
+
+  traceid stack_trace_id = 0;
+  unsigned int stack_trace_hash = 0;
+  if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
+    stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash);
+    thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash);
+  }
+
+  JfrTryLock tryLock(&_tryLock);
+  if (!tryLock.has_lock()) {
+    if (LogJFR && Verbose) tty->print_cr("Skipping old object sample due to lock contention");
+    return;
+  }
+
+  if (_dead_samples) {
+    scavenge();
+    assert(!_dead_samples, "invariant");
+  }
+
+  _total_allocated += allocated;
+  const size_t span = _total_allocated - _priority_queue->total();
+  ObjectSample* sample;
+  if ((size_t)_priority_queue->count() == _size) {
+    assert(_list->count() == _size, "invariant");
+    const ObjectSample* peek = _priority_queue->peek();
+    if (peek->span() > span) {
+      // quick reject, will not fit
+      return;
+    }
+    sample = _list->reuse(_priority_queue->pop());
+  } else {
+    sample = _list->get();
+  }
+
+  assert(sample != NULL, "invariant");
+  assert(thread_id != 0, "invariant");
+  sample->set_thread_id(thread_id);
+  sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
+
+  if (stack_trace_id != 0) {
+    sample->set_stack_trace_id(stack_trace_id);
+    sample->set_stack_trace_hash(stack_trace_hash);
+  }
+
+  sample->set_span(allocated);
+  sample->set_object((oop)obj);
+  sample->set_allocated(allocated);
+  sample->set_allocation_time(JfrTicks::now());
+  sample->set_heap_used_at_last_gc(Universe::get_heap_used_at_last_gc());
+  _priority_queue->push(sample);
+}
+
+const ObjectSample* ObjectSampler::last() const {
+  return _list->last();
+}
+
+const ObjectSample* ObjectSampler::last_resolved() const {
+  return _list->last_resolved();
+}
+
+void ObjectSampler::set_last_resolved(const ObjectSample* sample) {
+  _list->set_last_resolved(sample);
+}
+
+void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  ObjectSample* current = _list->last();
+  while (current != NULL) {
+    ObjectSample* next = current->next();
+    if (!current->is_dead()) {
+      if (is_alive->do_object_b(current->object())) {
+        // The weakly referenced object is alive, update pointer
+        f->do_oop(const_cast<oop*>(current->object_addr()));
+      } else {
+        current->set_dead();
+        _dead_samples = true;
+      }
+    }
+    current = next;
+  }
+  _last_sweep = JfrTicks::now();
+}
+
+void ObjectSampler::remove_dead(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  assert(sample->is_dead(), "invariant");
+  ObjectSample* const previous = sample->prev();
+  // push span on to previous
+  if (previous != NULL) {
+    _priority_queue->remove(previous);
+    previous->add_span(sample->span());
+    _priority_queue->push(previous);
+  }
+  _priority_queue->remove(sample);
+  _list->release(sample);
+}
+
+void ObjectSampler::scavenge() {
+  ObjectSample* current = _list->last();
+  while (current != NULL) {
+    ObjectSample* next = current->next();
+    if (current->is_dead()) {
+      remove_dead(current);
+    }
+    current = next;
+  }
+  _dead_samples = false;
+}
+
+int ObjectSampler::item_count() const {
+  return _priority_queue->count();
+}
+
+const ObjectSample* ObjectSampler::item_at(int index) const {
+  return _priority_queue->item_at(index);
+}
+
+ObjectSample* ObjectSampler::item_at(int index) {
+  return const_cast<ObjectSample*>(
+    const_cast<const ObjectSampler*>(this)->item_at(index)
+                                   );
+}
+
+const JfrTicks& ObjectSampler::last_sweep() const {
+  return _last_sweep;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_SAMPLING_OBJECTSAMPLER_HPP
+#define SHARE_VM_LEAKPROFILER_SAMPLING_OBJECTSAMPLER_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+
+class BoolObjectClosure;
+class OopClosure;
+class ObjectSample;
+class ObjectSampler;
+class SampleList;
+class SamplePriorityQueue;
+class Thread;
+
+// Class reponsible for holding samples and
+// making sure the samples are evenly distributed as
+// new entries are added and removed.
+class ObjectSampler : public CHeapObj<mtTracing> {
+  friend class LeakProfiler;
+  friend class ObjectSampleCheckpoint;
+  friend class StartOperation;
+  friend class StopOperation;
+  friend class EmitEventOperation;
+ private:
+  SamplePriorityQueue* _priority_queue;
+  SampleList* _list;
+  JfrTicks _last_sweep;
+  size_t _total_allocated;
+  size_t _threshold;
+  size_t _size;
+  volatile int _tryLock;
+  bool _dead_samples;
+
+  explicit ObjectSampler(size_t size);
+  ~ObjectSampler();
+
+  void add(HeapWord* object, size_t size, JavaThread* thread);
+  void remove_dead(ObjectSample* sample);
+  void scavenge();
+
+  // Called by GC
+  void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+
+ public:
+  const ObjectSample* item_at(int index) const;
+  ObjectSample* item_at(int index);
+  int item_count() const;
+  const ObjectSample* last() const;
+  const ObjectSample* last_resolved() const;
+  void set_last_resolved(const ObjectSample* sample);
+  const JfrTicks& last_sweep() const;
+};
+
+#endif // SHARE_VM_LEAKPROFILER_SAMPLING_OBJECTSAMPLER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/sampling/sampleList.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/sampleList.hpp"
+#include "oops/oop.inline.hpp"
+
+SampleList::SampleList(size_t limit, size_t cache_size) :
+  _free_list(),
+  _in_use_list(),
+  _last_resolved(NULL),
+  _limit(limit),
+  _cache_size(cache_size),
+  _allocated(0) {
+}
+
+SampleList::~SampleList() {
+  deallocate_samples(_free_list);
+  deallocate_samples(_in_use_list);
+}
+
+ObjectSample* SampleList::last() const {
+  return _in_use_list.head();
+}
+
+const ObjectSample* SampleList::last_resolved() const {
+  return _last_resolved;
+}
+
+void SampleList::set_last_resolved(const ObjectSample* sample) {
+  assert(last() == sample, "invariant");
+  _last_resolved = sample;
+}
+
+void SampleList::link(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  _in_use_list.prepend(sample);
+}
+
+void SampleList::unlink(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  if (_last_resolved == sample) {
+    _last_resolved = sample->next();
+  }
+  reset(_in_use_list.remove(sample));
+}
+
+ObjectSample* SampleList::reuse(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  unlink(sample);
+  link(sample);
+  return sample;
+}
+
+void SampleList::populate_cache() {
+  if (_free_list.count() < _cache_size) {
+    const size_t cache_delta = _cache_size - _free_list.count();
+    for (size_t i = 0; i < cache_delta; ++i) {
+      ObjectSample* sample = newSample();
+      if (sample != NULL) {
+        _free_list.append(sample);
+      }
+    }
+  }
+}
+
+ObjectSample* SampleList::newSample() const {
+  if (_limit == _allocated) {
+    return NULL;
+  }
+  ++_allocated;
+  return new ObjectSample();
+}
+
+ObjectSample* SampleList::get() {
+  ObjectSample* sample = _free_list.head();
+  if (sample != NULL) {
+    link(_free_list.remove(sample));
+  } else {
+    sample = newSample();
+    if (sample != NULL) {
+      _in_use_list.prepend(sample);
+    }
+  }
+  if (_cache_size > 0 && sample != NULL) {
+    populate_cache();
+  }
+  return sample;
+}
+
+void SampleList::release(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  unlink(sample);
+  _free_list.append(sample);
+}
+
+void SampleList::deallocate_samples(List& list) {
+  if (list.count() > 0) {
+    ObjectSample* sample = list.head();
+    while (sample != NULL) {
+      list.remove(sample);
+      delete sample;
+      sample = list.head();
+    }
+  }
+  assert(list.count() == 0, "invariant");
+}
+
+void SampleList::reset(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  sample->reset();
+}
+
+bool SampleList::is_full() const {
+  return _in_use_list.count() == _limit;
+}
+
+size_t SampleList::count() const {
+  return _in_use_list.count();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/sampling/sampleList.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLELIST_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLELIST_HPP
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrDoublyLinkedList.hpp"
+
+class ObjectSample;
+
+class SampleList : public JfrCHeapObj {
+  typedef JfrDoublyLinkedList<ObjectSample> List;
+ private:
+  List _free_list;
+  List _in_use_list;
+  const ObjectSample* _last_resolved;
+  mutable size_t _allocated;
+  const size_t _limit;
+  const size_t _cache_size;
+
+  void populate_cache();
+  ObjectSample* newSample() const;
+  void link(ObjectSample* sample);
+  void unlink(ObjectSample* sample);
+  void deallocate_samples(List& list);
+  void reset(ObjectSample* sample);
+
+ public:
+  SampleList(size_t limit, size_t cache_size = 0);
+  ~SampleList();
+
+  void set_last_resolved(const ObjectSample* sample);
+  ObjectSample* get();
+  ObjectSample* last() const;
+  void release(ObjectSample* sample);
+  const ObjectSample* last_resolved() const;
+  ObjectSample* reuse(ObjectSample* sample);
+  bool is_full() const;
+  size_t count() const;
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLELIST_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp"
+#include "memory/allocation.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+SamplePriorityQueue::SamplePriorityQueue(size_t size) :
+  _allocated_size(size),
+  _count(0),
+  _total(0) {
+  _items =  NEW_C_HEAP_ARRAY(ObjectSample*, size, mtTracing);
+  memset(_items, 0, sizeof(ObjectSample*) * size);
+}
+
+SamplePriorityQueue::~SamplePriorityQueue() {
+  FREE_C_HEAP_ARRAY(ObjectSample*, _items, mtTracing);
+  _items = NULL;
+}
+
+void SamplePriorityQueue::push(ObjectSample* item) {
+  assert(item != NULL, "invariant");
+  assert(_items[_count] == NULL, "invariant");
+
+  _items[_count] = item;
+  _items[_count]->set_index(_count);
+  _count++;
+  moveUp(_count - 1);
+  _total += item->span();
+}
+
+size_t SamplePriorityQueue::total() const {
+  return _total;
+}
+
+ObjectSample* SamplePriorityQueue::pop() {
+  if (_count == 0) {
+    return NULL;
+  }
+
+  ObjectSample* const s = _items[0];
+  assert(s != NULL, "invariant");
+  swap(0, _count - 1);
+  _count--;
+  assert(s == _items[_count], "invariant");
+  // clear from heap
+  _items[_count] = NULL;
+  moveDown(0);
+  _total -= s->span();
+  return s;
+}
+
+void SamplePriorityQueue::swap(int i, int j) {
+  ObjectSample* tmp = _items[i];
+  _items[i] = _items[j];
+  _items[j] = tmp;
+  _items[i]->set_index(i);
+  _items[j]->set_index(j);
+}
+
+static int left(int i) {
+  return 2 * i + 1;
+}
+
+static int right(int i) {
+  return 2 * i + 2;
+}
+
+static int parent(int i) {
+  return (i - 1) / 2;
+}
+
+void SamplePriorityQueue::moveDown(int i) {
+  do {
+    int j = -1;
+    int r = right(i);
+    if (r < _count && _items[r]->span() < _items[i]->span()) {
+      int l = left(i);
+      if (_items[l]->span() < _items[r]->span()) {
+        j = l;
+      } else {
+        j = r;
+      }
+    } else {
+      int l = left(i);
+      if (l < _count && _items[l]->span() < _items[i]->span()) {
+        j = l;
+      }
+    }
+    if (j >= 0) {
+      swap(i, j);
+    }
+    i = j;
+  } while (i >= 0);
+
+}
+
+void SamplePriorityQueue::moveUp(int i) {
+  int p = parent(i);
+  while (i > 0 && _items[i]->span() < _items[p]->span()) {
+    swap(i,p);
+    i = p;
+    p = parent(i);
+  }
+}
+
+void SamplePriorityQueue::remove(ObjectSample* s) {
+  assert(s != NULL, "invariant");
+  const size_t realSpan = s->span();
+  s->set_span(0);
+  moveUp(s->index());
+  s->set_span(realSpan);
+  pop();
+}
+
+int SamplePriorityQueue::count() const {
+  return _count;
+}
+
+const ObjectSample* SamplePriorityQueue::peek() const {
+  return _count == 0 ? NULL : _items[0];
+}
+
+ObjectSample* SamplePriorityQueue::item_at(int index) {
+  assert(index >= 0 && index < _count, "out of range");
+  return _items[index];
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLEPRIORITYQUEUE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLEPRIORITYQUEUE_HPP
+
+#include "memory/allocation.hpp"
+
+class ObjectSample;
+
+// Priority queue that keeps object samples ordered
+// by the amount of allocation they span.
+class SamplePriorityQueue : public CHeapObj<mtTracing> {
+ private:
+  ObjectSample** _items;
+  size_t _allocated_size;
+  int _count;
+  size_t _total;
+
+  void swap(int i, int j);
+  void moveDown(int index);
+  void moveUp(int index);
+
+ public:
+  SamplePriorityQueue(size_t size);
+  ~SamplePriorityQueue();
+
+  void push(ObjectSample* sample);
+  ObjectSample* pop();
+  const ObjectSample* peek() const;
+  void remove(ObjectSample* sample);
+  ObjectSample* item_at(int index);
+  size_t total() const;
+  int count() const;
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLEPRIORITYQUEUE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/startOperation.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP
+#define SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP
+
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "runtime/vm_operations.hpp"
+
+// Safepoint operation for starting leak profiler object sampler
+class StartOperation : public VM_Operation {
+ private:
+  jlong _sample_count;
+ public:
+  StartOperation(jlong sample_count) :
+    _sample_count(sample_count) {
+  }
+
+  Mode evaluation_mode() const {
+    return _safepoint;
+  }
+
+  VMOp_Type type() const {
+    return VMOp_GC_HeapInspection;
+  }
+
+  virtual void doit() {
+    assert(!LeakProfiler::is_running(), "invariant");
+    jint queue_size = JfrOptionSet::old_object_queue_size();
+    LeakProfiler::set_object_sampler(new ObjectSampler(queue_size));
+    if (LogJFR && Verbose) tty->print_cr( "Object sampling started");
+  }
+};
+
+#endif // SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/stopOperation.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP
+#define SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP
+
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "runtime/vm_operations.hpp"
+
+// Safepoint operation for stopping leak profiler object sampler
+class StopOperation : public VM_Operation {
+ public:
+  StopOperation() {}
+
+  Mode evaluation_mode() const {
+    return _safepoint;
+  }
+
+  VMOp_Type type() const {
+    return VMOp_GC_HeapInspection;
+  }
+
+  virtual void doit() {
+    assert(LeakProfiler::is_running(), "invariant");
+    ObjectSampler* object_sampler = LeakProfiler::object_sampler();
+    delete object_sampler;
+    LeakProfiler::set_object_sampler(NULL);
+    if (LogJFR && Verbose) tty->print_cr( "Object sampling stopped");
+  }
+};
+
+#endif // SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/utilities/granularTimer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/utilities/granularTimer.hpp"
+
+long GranularTimer::_granularity = 0;
+long GranularTimer::_counter = 0;
+JfrTicks GranularTimer::_finish_time_ticks = 0;
+JfrTicks GranularTimer::_start_time_ticks = 0;
+bool GranularTimer::_finished = false;
+
+void GranularTimer::start(jlong duration_ticks, long granularity) {
+  assert(granularity > 0, "granularity must be at least 1");
+  _granularity = granularity;
+  _counter = granularity;
+  _start_time_ticks = JfrTicks::now();
+  const jlong end_time_ticks = _start_time_ticks.value() + duration_ticks;
+  _finish_time_ticks = end_time_ticks < 0 ? JfrTicks(max_jlong) : JfrTicks(end_time_ticks);
+  _finished = _finish_time_ticks == _start_time_ticks;
+  assert(_finish_time_ticks.value() >= 0, "invariant");
+  assert(_finish_time_ticks >= _start_time_ticks, "invariant");
+}
+void GranularTimer::stop() {
+  if (!_finished) {
+    _finish_time_ticks = JfrTicks::now();
+  }
+}
+const JfrTicks& GranularTimer::start_time() {
+  return _start_time_ticks;
+}
+
+const JfrTicks& GranularTimer::end_time() {
+  return _finish_time_ticks;
+}
+
+bool GranularTimer::is_finished() {
+  assert(_granularity != 0, "GranularTimer::is_finished must be called after GranularTimer::start");
+  if (--_counter == 0) {
+    if (_finished) {
+      // reset so we decrease to zero at next iteration
+      _counter = 1;
+      return true;
+    }
+    if (JfrTicks::now() > _finish_time_ticks) {
+      _finished = true;
+      _counter = 1;
+      return true;
+    }
+    assert(_counter == 0, "invariant");
+    _counter = _granularity; // restore next batch
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/utilities/granularTimer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_UTILITIES_GRANULARTIMER_HPP
+#define SHARE_VM_LEAKPROFILER_UTILITIES_GRANULARTIMER_HPP
+
+#include "jfr/utilities/jfrTime.hpp"
+#include "memory/allocation.hpp"
+
+class GranularTimer : public AllStatic {
+ private:
+  static JfrTicks _finish_time_ticks;
+  static JfrTicks _start_time_ticks;
+  static long _counter;
+  static long _granularity;
+  static bool _finished;
+ public:
+  static void start(jlong duration_ticks, long granularity);
+  static void stop();
+  static const JfrTicks& start_time();
+  static const JfrTicks& end_time();
+  static bool is_finished();
+};
+
+#endif // SHARE_VM_LEAKPROFILER_UTILITIES_GRANULARTIMER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/utilities/rootType.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_LEAKPROFILER_UTILITIES_ROOTTYPE_HPP
+#define SHARE_VM_LEAKPROFILER_UTILITIES_ROOTTYPE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class OldObjectRoot : public AllStatic {
+ public:
+  enum System {
+    _system_undetermined,
+    _universe,
+    _global_jni_handles,
+    _threads,
+    _object_synchronizer,
+    _system_dictionary,
+    _class_loader_data,
+    _management,
+    _jvmti,
+    _code_cache,
+    _string_table,
+    _aot,
+    _number_of_systems
+  };
+
+  enum Type {
+    _type_undetermined,
+    _stack_variable,
+    _local_jni_handle,
+    _global_jni_handle,
+    _handle_area,
+    _number_of_types
+  };
+
+  static const char* system_description(System system) {
+    switch (system) {
+      case _system_undetermined:
+        return "<unknown>";
+      case _universe:
+        return "Universe";
+      case _global_jni_handles:
+        return "Global JNI Handles";
+      case _threads:
+        return "Threads";
+      case _object_synchronizer:
+        return "Object Monitor";
+      case _system_dictionary:
+        return "System Dictionary";
+      case _class_loader_data:
+        return "Class Loader Data";
+      case _management:
+        return "Management";
+      case _jvmti:
+        return "JVMTI";
+      case _code_cache:
+        return "Code Cache";
+      case _string_table:
+        return "String Table";
+      case _aot:
+        return "AOT";
+      default:
+        ShouldNotReachHere();
+    }
+    return NULL;
+  }
+
+  static const char* type_description(Type type) {
+    switch (type) {
+      case _type_undetermined:
+        return "<unknown>";
+      case _stack_variable:
+        return "Stack Variable";
+      case _local_jni_handle:
+        return "Local JNI Handle";
+      case _global_jni_handle:
+        return "Global JNI Handle";
+      case _handle_area:
+        return "Handle Area";
+      default:
+        ShouldNotReachHere();
+    }
+    return NULL;
+  }
+};
+
+#endif // SHARE_VM_LEAKPROFILER_UTILITIES_ROOTTYPE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/utilities/saveRestore.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "jfr/leakprofiler/utilities/saveRestore.hpp"
+#include "oops/oop.inline.hpp"
+
+MarkOopContext::MarkOopContext() : _obj(NULL), _mark_oop(NULL) {}
+
+MarkOopContext::MarkOopContext(const oop obj) : _obj(obj), _mark_oop(obj->mark()) {
+  assert(_obj->mark() == _mark_oop, "invariant");
+  // now we will "poison" the mark word of the object
+  // to the intermediate monitor INFLATING state.
+  // This is an "impossible" state during a safepoint,
+  // hence we will use it to quickly identify objects
+  // during the reachability search from gc roots.
+  assert(NULL == markOopDesc::INFLATING(), "invariant");
+  _obj->set_mark(markOopDesc::INFLATING());
+  assert(NULL == obj->mark(), "invariant");
+}
+
+MarkOopContext::~MarkOopContext() {
+  if (_obj != NULL) {
+    _obj->set_mark(_mark_oop);
+    assert(_obj->mark() == _mark_oop, "invariant");
+  }
+}
+
+MarkOopContext::MarkOopContext(const MarkOopContext& rhs) : _obj(NULL), _mark_oop(NULL) {
+  swap(const_cast<MarkOopContext&>(rhs));
+}
+
+void MarkOopContext::operator=(MarkOopContext rhs) {
+  swap(rhs);
+}
+
+void MarkOopContext::swap(MarkOopContext& rhs) {
+  oop temp_obj = rhs._obj;
+  markOop temp_mark_oop = rhs._mark_oop;
+  rhs._obj = _obj;
+  rhs._mark_oop = _mark_oop;
+  _obj = temp_obj;
+  _mark_oop = temp_mark_oop;
+}
+
+CLDClaimContext::CLDClaimContext() : _cld(NULL) {}
+
+CLDClaimContext::CLDClaimContext(ClassLoaderData* cld) : _cld(cld) {
+  assert(_cld->claimed(), "invariant");
+  _cld->clear_claimed();
+}
+
+CLDClaimContext::~CLDClaimContext() {
+  if (_cld != NULL) {
+    _cld->claim();
+    assert(_cld->claimed(), "invariant");
+  }
+}
+
+CLDClaimContext::CLDClaimContext(const CLDClaimContext& rhs) : _cld(NULL) {
+  swap(const_cast<CLDClaimContext&>(rhs));
+}
+
+void CLDClaimContext::operator=(CLDClaimContext rhs) {
+  swap(rhs);
+}
+
+void CLDClaimContext::swap(CLDClaimContext& rhs) {
+  ClassLoaderData* temp_cld = rhs._cld;
+  rhs._cld = _cld;
+  _cld = temp_cld;
+}
+
+CLDClaimStateClosure::CLDClaimStateClosure() : CLDClosure(), _state() {}
+
+void CLDClaimStateClosure::do_cld(ClassLoaderData* cld) {
+  assert(cld != NULL, "invariant");
+  if (cld->claimed()) {
+    _state.save(cld);
+  }
+}
+
+SaveRestoreCLDClaimBits::SaveRestoreCLDClaimBits() : _claim_state_closure() {
+  ClassLoaderDataGraph::cld_do(&_claim_state_closure);
+}
+
+SaveRestoreCLDClaimBits::~SaveRestoreCLDClaimBits() {
+  ClassLoaderDataGraph::clear_claimed_marks();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/utilities/saveRestore.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_UTILITIES_SAVERESTORE_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_UTILITIES_SAVERESTORE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+#include "oops/markOop.hpp"
+#include "utilities/growableArray.hpp"
+
+template <typename T, typename Impl>
+class SaveRestore {
+ private:
+  Impl _impl;
+ public:
+  SaveRestore() : _impl() {
+    _impl.setup();
+  }
+
+  void save(T const& value) {
+    _impl.save(value);
+  }
+
+  ~SaveRestore() {
+    _impl.restore();
+  }
+};
+
+template <typename T, typename Context>
+class ContextStore {
+private:
+  GrowableArray<Context>* _storage;
+public:
+  ContextStore() : _storage(NULL) {}
+
+  void setup() {
+    assert(_storage == NULL, "invariant");
+    _storage = new GrowableArray<Context>(16);
+  }
+
+  void save(T const& value) {
+    _storage->push(Context(value));
+  }
+
+  void restore() {
+    for (int i = 0; i < _storage->length(); ++i) {
+      _storage->at(i).~Context();
+    }
+  }
+};
+
+/*
+* This class will save the original mark oop of an object sample object.
+* It will then install an "identifier" mark oop to be used for
+* identification purposes in the search for reference chains.
+* The destructor will restore the original mark oop.
+*/
+
+class MarkOopContext {
+ private:
+  oop _obj;
+  markOop _mark_oop;
+  void swap(MarkOopContext& rhs);
+ public:
+  MarkOopContext();
+  MarkOopContext(const oop obj);
+  MarkOopContext(const MarkOopContext& rhs);
+  void operator=(MarkOopContext rhs);
+  ~MarkOopContext();
+};
+
+typedef SaveRestore<oop, ContextStore<oop, MarkOopContext> > SaveRestoreMarkOops;
+
+class ClassLoaderData;
+
+class CLDClaimContext {
+ private:
+  ClassLoaderData* _cld;
+  void swap(CLDClaimContext& rhs);
+ public:
+  CLDClaimContext();
+  CLDClaimContext(ClassLoaderData* cld);
+  CLDClaimContext(const CLDClaimContext& rhs);
+  void operator=(CLDClaimContext rhs);
+  ~CLDClaimContext();
+};
+
+typedef SaveRestore<ClassLoaderData*, ContextStore<ClassLoaderData*, CLDClaimContext> > SaveRestoreCLDClaimState;
+
+class CLDClaimStateClosure : public CLDClosure {
+ private:
+  SaveRestoreCLDClaimState _state;
+ public:
+  CLDClaimStateClosure();
+  void do_cld(ClassLoaderData* cld);
+};
+
+class SaveRestoreCLDClaimBits : public StackObj {
+ private:
+  CLDClaimStateClosure _claim_state_closure;
+ public:
+  SaveRestoreCLDClaimBits();
+  ~SaveRestoreCLDClaimBits();
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_UTILITIES_SAVERESTORE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/utilities/unifiedOop.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP
+#define SHARE_VM_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP
+
+#include "oops/oop.inline.hpp"
+
+class UnifiedOop : public AllStatic {
+ public:
+  static const bool is_narrow(const oop* ref) {
+    assert(ref != NULL, "invariant");
+    return 1 == (((u8)ref) & 1);
+  }
+
+  static const oop* decode(const oop* ref) {
+    assert(ref != NULL, "invariant");
+    return is_narrow(ref) ? (const oop*)(((u8)ref) & ~1) : ref;
+  }
+
+  static const oop* encode(narrowOop* ref) {
+    assert(ref != NULL, "invariant");
+    return (const oop*)((u8)ref | 1);
+  }
+
+  static oop dereference(const oop* ref) {
+    assert(ref != NULL, "invariant");
+    return is_narrow(ref) ?
+      oopDesc::load_decode_heap_oop((narrowOop*)decode(ref)) :
+      oopDesc::load_decode_heap_oop(const_cast<oop*>(ref));
+  }
+};
+
+#endif // SHARE_VM_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/metadata/jfrSerializer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_METADATA_JFRSERIALIZER_HPP
+#define SHARE_VM_JFR_METADATA_JFRSERIALIZER_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfrfiles/jfrTypes.hpp"
+
+/*
+ * A "type" in Jfr is a binary relation defined by enumerating a set of <key, value> ordered pairs:
+ *
+ * { <1, myvalue>, <2, mysecondvalue>, ... }
+ *
+ * The key should be a type relative unique id. A value is an instance of the type.
+ *
+ * By defining and registering a type, keys can be written to event fields and the
+ * framework will maintain the mapping to the corresponding value (if you register as below).
+ *
+ * Inherit JfrSerializer, create a CHeapObj instance and then use JfrSerializer::register_serializer(...) to register.
+ * Once registered, the ownership of the serializer instance is transferred to Jfr.
+ *
+ * How to register:
+ *
+ * bool register_serializer(JfrTypeId id, bool require_safepoint, bool permit_cache, JfrSerializer* serializer)
+ *
+ * The type identifiers are machine generated into an enum located in jfrfiles/jfrTypes.hpp (included).
+ *
+ *  enum JfrTypeId {
+ *    ...
+ *    TYPE_THREADGROUP,
+ *    TYPE_CLASSLOADER,
+ *    TYPE_METHOD,
+ *    TYPE_SYMBOL,
+ *    TYPE_THREADSTATE,
+ *    TYPE_INFLATECAUSE,
+ *    ...
+ *
+ * id                 this is the id of the type your are defining (see the enum above).
+ * require_safepoint  indicate if your type need to be evaluated and serialized under a safepoint.
+ * permit_cache       indicate if your type constants are stable to be cached.
+ *                    (implies the callback is invoked only once and the contents will be cached. Set this to true for static information).
+ * serializer         the serializer instance.
+ *
+ * See below for guidance about how to implement serialize().
+ *
+ */
+class JfrSerializer : public CHeapObj<mtTracing> {
+ public:
+  virtual ~JfrSerializer() {}
+  static bool register_serializer(JfrTypeId id, bool require_safepoint, bool permit_cache, JfrSerializer* serializer);
+  virtual void serialize(JfrCheckpointWriter& writer) = 0;
+};
+
+/*
+ * Defining serialize(JfrCheckpointWriter& writer):
+ *
+ *  Invoke writer.write_count(N) for the number of ordered pairs (cardinality) to be defined.
+ *
+ *  You then write each individual ordered pair, <key, value> ...
+ *
+ *  Here is a simple example, describing a type defining string constants:
+ *
+ *  void MyType::serialize(JfrCheckpointWriter& writer) {
+ *    const int nof_causes = ObjectSynchronizer::inflate_cause_nof;
+ *    writer.write_count(nof_causes);                           // write number of ordered pairs (mappings) to follow
+ *    for (int i = 0; i < nof_causes; i++) {
+ *      writer.write_key(i);                                    // write key
+ *      writer.write(ObjectSynchronizer::inflate_cause_name((ObjectSynchronizer::InflateCause)i)); // write value
+ *    }
+ *  }
+ *
+ * Note that values can be complex, and can also referer to other types.
+ *
+ * Please see jfr/recorder/checkpoint/types/jfrType.cpp for reference.
+ */
+
+#endif // SHARE_VM_JFR_METADATA_JFRSERIALIZER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/metadata/metadata.xml	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,1138 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+
+-->
+
+<Metadata>
+
+  <Event name="ThreadStart" category="Java Application" label="Java Thread Start" thread="true" startTime="false">
+    <Field type="Thread" name="thread" label="Java Thread" />
+  </Event>
+
+  <Event name="ThreadEnd" category="Java Application" label="Java Thread End" thread="true" startTime="false">
+    <Field type="Thread" name="thread" label="Java Thread" />
+  </Event>
+
+  <Event name="ThreadSleep" category="Java Application" label="Java Thread Sleep" thread="true" stackTrace="true">
+    <Field type="long" contentType="millis" name="time" label="Sleep Time" />
+  </Event>
+
+  <Event name="ThreadPark" category="Java Application" label="Java Thread Park" thread="true" stackTrace="true">
+    <Field type="Class" name="parkedClass" label="Class Parked On" />
+    <Field type="long" contentType="millis" name="timeout" label="Park Timeout" />
+    <Field type="ulong" contentType="address" name="address" label="Address of Object Parked" relation="JavaMonitorAddress" />
+  </Event>
+
+  <Event name="JavaMonitorEnter" category="Java Application" label="Java Monitor Blocked" thread="true" stackTrace="true">
+    <Field type="Class" name="monitorClass" label="Monitor Class" />
+    <Field type="Thread" name="previousOwner" label="Previous Monitor Owner" />
+    <Field type="ulong" contentType="address" name="address" label="Monitor Address" relation="JavaMonitorAddress" />
+  </Event>
+
+  <Event name="JavaMonitorWait" category="Java Application" label="Java Monitor Wait" description="Waiting on a Java monitor" thread="true" stackTrace="true">
+    <Field type="Class" name="monitorClass" label="Monitor Class" description="Class of object waited on" />
+    <Field type="Thread" name="notifier" label="Notifier Thread" description="Notifying Thread" />
+    <Field type="long" contentType="millis" name="timeout" label="Timeout" description="Maximum wait time" />
+    <Field type="boolean" name="timedOut" label="Timed Out" description="Wait has been timed out" />
+    <Field type="ulong" contentType="address" name="address" label="Monitor Address" description="Address of object waited on" relation="JavaMonitorAddress" />
+  </Event>
+
+  <Event name="JavaMonitorInflate" category="Java Application" label="Java Monitor Inflated" thread="true" stackTrace="true">
+    <Field type="Class" name="monitorClass" label="Monitor Class" />
+    <Field type="ulong" contentType="address" name="address" label="Monitor Address" relation="JavaMonitorAddress" />
+    <Field type="InflateCause" name="cause" label="Monitor Inflation Cause" description="Cause of inflation" />
+  </Event>
+
+  <Event name="BiasedLockRevocation" category="Java Application" label="Biased Lock Revocation" description="Revoked bias of object" thread="true"
+    stackTrace="true">
+    <Field type="Class" name="lockClass" label="Lock Class" description="Class of object whose biased lock was revoked" />
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+    <Field type="Thread" name="previousOwner" label="Previous Owner" description="Thread owning the bias before revocation" />
+  </Event>
+
+  <Event name="BiasedLockSelfRevocation" category="Java Application" label="Biased Lock Self Revocation" description="Revoked bias of object biased towards own thread"
+    thread="true" stackTrace="true">
+    <Field type="Class" name="lockClass" label="Lock Class" description="Class of object whose biased lock was revoked" />
+  </Event>
+
+  <Event name="BiasedLockClassRevocation" category="Java Application" label="Biased Lock Class Revocation" description="Revoked biases for all instances of a class"
+    thread="true" stackTrace="true">
+    <Field type="Class" name="revokedClass" label="Revoked Class" description="Class whose biased locks were revoked" />
+    <Field type="boolean" name="disableBiasing" label="Disable Further Biasing" description="Whether further biasing for instances of this class will be allowed" />
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+  </Event>
+
+  <Event name="ReservedStackActivation" category="Java Virtual Machine, Runtime" label="Reserved Stack Activation"
+    description="Activation of Reserved Stack Area caused by stack overflow with ReservedStackAccess annotated method in call stack" thread="true" stackTrace="true"
+    startTime="false">
+    <Field type="Method" name="method" label="Java Method" />
+  </Event>
+
+  <Event name="ClassLoad" category="Java Virtual Machine, Class Loading" label="Class Load" thread="true" stackTrace="true">
+    <Field type="Class" name="loadedClass" label="Loaded Class" />
+    <Field type="ClassLoader" name="definingClassLoader" label="Defining Class Loader" />
+    <Field type="ClassLoader" name="initiatingClassLoader" label="Initiating Class Loader" />
+  </Event>
+
+  <Event name="ClassDefine" category="Java Virtual Machine, Class Loading" label="Class Define" thread="true" stackTrace="true" startTime="false">
+    <Field type="Class" name="definedClass" label="Defined Class" />
+    <Field type="ClassLoader" name="definingClassLoader" label="Defining Class Loader" />
+  </Event>
+
+  <Event name="ClassUnload" category="Java Virtual Machine, Class Loading" label="Class Unload" thread="true" startTime="false">
+    <Field type="Class" name="unloadedClass" label="Unloaded Class" />
+    <Field type="ClassLoader" name="definingClassLoader" label="Defining Class Loader" />
+  </Event>
+
+  <Event name="IntFlagChanged" category="Java Virtual Machine, Flag" label="Int Flag Changed" startTime="false">
+    <Field type="string" name="name" label="Name" />
+    <Field type="int" name="oldValue" label="Old Value" />
+    <Field type="int" name="newValue" label="New Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="UnsignedIntFlagChanged" category="Java Virtual Machine, Flag" label="Unsigned Int Flag Changed" startTime="false">
+    <Field type="string" name="name" label="Name" />
+    <Field type="uint" name="oldValue" label="Old Value" />
+    <Field type="uint" name="newValue" label="New Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="LongFlagChanged" category="Java Virtual Machine, Flag" label="Long Flag Changed" startTime="false">
+    <Field type="string" name="name" label="Name" />
+    <Field type="long" name="oldValue" label="Old Value" />
+    <Field type="long" name="newValue" label="New Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="UnsignedLongFlagChanged" category="Java Virtual Machine, Flag" label="Unsigned Long Flag Changed" startTime="false">
+    <Field type="string" name="name" label="Name" />
+    <Field type="ulong" name="oldValue" label="Old Value" />
+    <Field type="ulong" name="newValue" label="New Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="DoubleFlagChanged" category="Java Virtual Machine, Flag" label="Double Flag Changed" startTime="false">
+    <Field type="string" name="name" label="Name" />
+    <Field type="double" name="oldValue" label="Old Value" />
+    <Field type="double" name="newValue" label="New Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="BooleanFlagChanged" category="Java Virtual Machine, Flag" label="Boolean Flag Changed" startTime="false">
+    <Field type="string" name="name" label="Name" />
+    <Field type="boolean" name="oldValue" label="Old Value" />
+    <Field type="boolean" name="newValue" label="New Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="StringFlagChanged" category="Java Virtual Machine, Flag" label="String Flag Changed" startTime="false">
+    <Field type="string" name="name" label="Name" />
+    <Field type="string" name="oldValue" label="Old Value" />
+    <Field type="string" name="newValue" label="New Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+ 
+  <Type name="VirtualSpace">
+    <Field type="ulong" contentType="address" name="start" label="Start Address" description="Start address of the virtual space" />
+    <Field type="ulong" contentType="address" name="committedEnd" label="Committed End Address" description="End address of the committed memory for the virtual space" />
+    <Field type="ulong" contentType="bytes" name="committedSize" label="Committed Size" description="Size of the committed memory for the virtual space" />
+    <Field type="ulong" contentType="address" name="reservedEnd" label="Reserved End Address" description="End address of the reserved memory for the virtual space" />
+    <Field type="ulong" contentType="bytes" name="reservedSize" label="Reserved Size" description="Size of the reserved memory for the virtual space" />
+  </Type>
+  
+  <Type name="ObjectSpace">
+    <Field type="ulong" contentType="address" name="start" label="Start Address" description="Start address of the space" />
+    <Field type="ulong" contentType="address" name="end" label="End Address" description="End address of the space" />
+    <Field type="ulong" contentType="bytes" name="used" label="Used" description="Bytes allocated by objects in the space" />
+    <Field type="ulong" contentType="bytes" name="size" label="Size" description="Size of the space" />
+  </Type>
+  
+  <Event name="GCHeapSummary" category="Java Virtual Machine, GC, Heap" label="Heap Summary" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="GCWhen" name="when" label="When" />
+    <Field type="VirtualSpace" struct="true" name="heapSpace" label="Heap Space" />
+    <Field type="ulong" contentType="bytes" name="heapUsed" label="Heap Used" description="Bytes allocated by objects in the heap" />
+  </Event>
+ 
+  <Type name="MetaspaceSizes">
+    <Field type="ulong" contentType="bytes" name="committed" label="Committed" description="Committed memory for this space" />
+    <Field type="ulong" contentType="bytes" name="used" label="Used" description="Bytes allocated by objects in the space" />
+    <Field type="ulong" contentType="bytes" name="reserved" label="Reserved" description="Reserved memory for this space" />
+  </Type>
+ 
+  <Event name="MetaspaceSummary" category="Java Virtual Machine, GC, Heap" label="Metaspace Summary" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="GCWhen" name="when" label="When" />
+    <Field type="ulong" contentType="bytes" name="gcThreshold" label="GC Threshold" />
+    <Field type="MetaspaceSizes" struct="true" name="metaspace" label="Total" />
+    <Field type="MetaspaceSizes" struct="true" name="dataSpace" label="Data" />
+    <Field type="MetaspaceSizes" struct="true" name="classSpace" label="Class" />
+  </Event>
+
+  <Event name="MetaspaceGCThreshold" category="Java Virtual Machine, GC, Metaspace" label="Metaspace GC Threshold" startTime="false">
+    <Field type="ulong" contentType="bytes" name="oldValue" label="Old Value" />
+    <Field type="ulong" contentType="bytes" name="newValue" label="New Value" />
+    <Field type="GCThresholdUpdater" name="updater" label="Updater" />
+  </Event>
+
+  <Event name="MetaspaceAllocationFailure" category="Java Virtual Machine, GC, Metaspace" label="Metaspace Allocation Failure" startTime="false"
+    stackTrace="true">
+    <Field type="ClassLoader" name="classLoader" label="Class Loader" />
+    <Field type="boolean" name="anonymousClassLoader" label="Anonymous Class Loader" />
+    <Field type="ulong" contentType="bytes" name="size" label="Size" />
+    <Field type="MetadataType" name="metadataType" label="Metadata Type" />
+    <Field type="MetaspaceObjectType" name="metaspaceObjectType" label="Metaspace Object Type" />
+  </Event>
+
+  <Event name="MetaspaceOOM" category="Java Virtual Machine, GC, Metaspace" label="Metaspace Out of Memory" startTime="false" stackTrace="true">
+    <Field type="ClassLoader" name="classLoader" label="Class Loader" />
+    <Field type="boolean" name="anonymousClassLoader" label="Anonymous Class Loader" />
+    <Field type="ulong" contentType="bytes" name="size" label="Size" />
+    <Field type="MetadataType" name="metadataType" label="Metadata Type" />
+    <Field type="MetaspaceObjectType" name="metaspaceObjectType" label="Metaspace Object Type" />
+  </Event>
+
+  <Event name="MetaspaceChunkFreeListSummary" category="Java Virtual Machine, GC, Metaspace" label="Metaspace Chunk Free List Summary" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="GCWhen" name="when" label="When" />
+    <Field type="MetadataType" name="metadataType" label="Metadata Type" />
+    <Field type="ulong" name="specializedChunks" label="Specialized Chunks" />
+    <Field type="ulong" contentType="bytes" name="specializedChunksTotalSize" label="Specialized Chunks Total Size" />
+    <Field type="ulong" name="smallChunks" label="Small Chunks" />
+    <Field type="ulong" contentType="bytes" name="smallChunksTotalSize" label="Small Chunks Total Size" />
+    <Field type="ulong" name="mediumChunks" label="Medium Chunks" />
+    <Field type="ulong" contentType="bytes" name="mediumChunksTotalSize" label="Medium Chunks Total Size" />
+    <Field type="ulong" name="humongousChunks" label="Humongous Chunks" />
+    <Field type="ulong" contentType="bytes" name="humongousChunksTotalSize" label="Humongous Chunks Total Size" />
+  </Event>
+
+  <Event name="PSHeapSummary" category="Java Virtual Machine, GC, Heap" label="Parallel Scavenge Heap Summary" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="GCWhen" name="when" label="When" />
+    <Field type="VirtualSpace" struct="true" name="oldSpace" label="Old Space" />
+    <Field type="ObjectSpace" struct="true" name="oldObjectSpace" label="Old Object Space" />
+    <Field type="VirtualSpace" struct="true" name="youngSpace" label="Young Space" />
+    <Field type="ObjectSpace" struct="true" name="edenSpace" label="Eden Space" />
+    <Field type="ObjectSpace" struct="true" name="fromSpace" label="From Space" />
+    <Field type="ObjectSpace" struct="true" name="toSpace" label="To Space" />
+  </Event>
+
+  <Event name="G1HeapSummary" category="Java Virtual Machine, GC, Heap" label="G1 Heap Summary" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="GCWhen" name="when" label="When" />
+    <Field type="ulong" contentType="bytes" name="edenUsedSize" label="Eden Used Size" />
+    <Field type="ulong" contentType="bytes" name="edenTotalSize" label="Eden Total Size" />
+    <Field type="ulong" contentType="bytes" name="survivorUsedSize" label="Survivor Used Size" />
+    <Field type="uint" name="numberOfRegions" label="Number of Regions" />
+  </Event>
+
+  <Event name="GarbageCollection" category="Java Virtual Machine, GC, Collector" label="Garbage Collection" description="Garbage collection performed by the JVM">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="GCName" name="name" label="Name" description="The name of the Garbage Collector" />
+    <Field type="GCCause" name="cause" label="Cause" description="The reason for triggering this Garbage Collection" />
+    <Field type="Tickspan" name="sumOfPauses" label="Sum of Pauses" description="Sum of all the times in which Java execution was paused during the garbage collection" />
+    <Field type="Tickspan" name="longestPause" label="Longest Pause" description="Longest individual pause during the garbage collection" />
+  </Event>
+
+  <Event name="ParallelOldGarbageCollection" category="Java Virtual Machine, GC, Collector" label="Parallel Old Garbage Collection"
+    description="Extra information specific to Parallel Old Garbage Collections">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="ulong" contentType="address" name="densePrefix" label="Dense Prefix" description="The address of the dense prefix, used when compacting" />
+  </Event>
+
+  <Event name="YoungGarbageCollection" category="Java Virtual Machine, GC, Collector" label="Young Garbage Collection" description="Extra information specific to Young Garbage Collections">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="uint" name="tenuringThreshold" label="Tenuring Threshold" />
+  </Event>
+
+  <Event name="OldGarbageCollection" category="Java Virtual Machine, GC, Collector" label="Old Garbage Collection" description="Extra information specific to Old Garbage Collections">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+  </Event>
+
+  <Event name="G1GarbageCollection" category="Java Virtual Machine, GC, Collector" label="G1 Garbage Collection" description="Extra information specific to G1 Garbage Collections">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="G1YCType" name="type" label="Type" />
+  </Event>
+
+  <Event name="G1MMU" category="Java Virtual Machine, GC, Detailed" label="G1 MMU Information" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="long" contentType="millis" name="timeSlice" label="Time Slice" description="Time slice used to calculate MMU" />
+    <Field type="long" contentType="millis" name="gcTime" label="GC Time" description="Time stopped because of GC during last time slice" />
+    <Field type="long" contentType="millis" name="pauseTarget" label="Pause Target" description="Max time allowed to be spent on GC during last time slice" />
+  </Event>
+
+  <Event name="EvacuationInformation" category="Java Virtual Machine, GC, Detailed" label="Evacuation Information" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="uint" name="cSetRegions" label="Collection Set Regions" />
+    <Field type="ulong" contentType="bytes" name="cSetUsedBefore" label="Collection Set Before" description="Memory usage before GC in the collection set regions" />
+    <Field type="ulong" contentType="bytes" name="cSetUsedAfter" label="Collection Set After" description="Memory usage after GC in the collection set regions" />
+    <Field type="uint" name="allocationRegions" label="Allocation Regions" description="Regions chosen as allocation regions during evacuation (includes survivors and old space regions)" />
+    <Field type="ulong" contentType="bytes" name="allocationRegionsUsedBefore" label="Allocation Regions Before" description="Memory usage before GC in allocation regions" />
+    <Field type="ulong" contentType="bytes" name="allocationRegionsUsedAfter" label="Allocation Regions After" description="Memory usage after GC in allocation regions" />
+    <Field type="ulong" contentType="bytes" name="bytesCopied" label="Bytes Copied" />
+    <Field type="uint" name="regionsFreed" label="Regions Freed" />
+  </Event>
+
+  <Event name="GCReferenceStatistics" category="Java Virtual Machine, GC, Reference" label="GC Reference Statistics" startTime="false"
+    description="Total count of processed references during GC">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="ReferenceType" name="type" label="Type" />
+    <Field type="ulong" name="count" label="Total Count" />
+  </Event>
+
+  <Type name="CopyFailed">
+    <Field type="ulong" name="objectCount" label="Object Count" />
+    <Field type="ulong" contentType="bytes" name="firstSize" label="First Failed Object Size" />
+    <Field type="ulong" contentType="bytes" name="smallestSize" label="Smallest Failed Object Size" />
+    <Field type="ulong" contentType="bytes" name="totalSize" label="Total Object Size" />
+  </Type>
+
+  <Event name="ObjectCountAfterGC" category="Java Virtual Machine, GC, Detailed" startTime="false" label="Object Count after GC">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="Class" name="objectClass" label="Object Class" />
+    <Field type="long" name="count" label="Count" />
+    <Field type="ulong" contentType="bytes" name="totalSize" label="Total Size" />
+  </Event>
+
+  <Type name="G1EvacuationStatistics">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="ulong" contentType="bytes" name="allocated" label="Allocated" description="Total memory allocated by PLABs" />
+    <Field type="ulong" contentType="bytes" name="wasted" label="Wasted" description="Total memory wasted within PLABs due to alignment or refill" />
+    <Field type="ulong" contentType="bytes" name="used" label="Used" description="Total memory occupied by objects within PLABs" />
+    <Field type="ulong" contentType="bytes" name="undoWaste" label="Undo Wasted" description="Total memory wasted due to allocation undo within PLABs" />
+    <Field type="ulong" contentType="bytes" name="regionEndWaste" label="Region End Wasted" description="Total memory wasted at the end of regions due to refill" />
+    <Field type="uint" contentType="bytes" name="regionsRefilled" label="Region Refills" description="Total memory wasted at the end of regions due to refill" />
+    <Field type="ulong" contentType="bytes" name="directAllocated" label="Allocated (direct)" description="Total memory allocated using direct allocation outside of PLABs" />
+    <Field type="ulong" contentType="bytes" name="failureUsed" label="Used (failure)" description="Total memory occupied by objects in regions where evacuation failed" />
+    <Field type="ulong" contentType="bytes" name="failureWaste" label="Wasted (failure)" description="Total memory left unused in regions where evacuation failed" />
+  </Type>
+
+  <Event name="G1EvacuationYoungStatistics" category="Java Virtual Machine, GC, Detailed" label="G1 Evacuation Statistics for Young" startTime="false"
+    description="Memory related evacuation statistics during GC for the young generation">
+    <Field type="G1EvacuationStatistics" struct="true" name="statistics" label="Evacuation Statistics" />
+  </Event>
+
+  <Event name="G1EvacuationOldStatistics" category="Java Virtual Machine, GC, Detailed" label="G1 Evacuation Memory Statistics for Old" startTime="false"
+    description="Memory related evacuation statistics during GC for the old generation">
+    <Field type="G1EvacuationStatistics" struct="true" name="statistics" label="Evacuation Statistics" />
+  </Event>
+
+  <Event name="G1BasicIHOP" category="Java Virtual Machine, GC, Detailed" label="G1 Basic IHOP Statistics" startTime="false"
+    description="Basic statistics related to current IHOP calculation">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="ulong" contentType="bytes" name="threshold" label="Current IHOP Threshold" description="Current IHOP threshold" />
+    <Field type="float" contentType="percentage" name="thresholdPercentage" label="Current IHOP Threshold" description="Current IHOP threshold in percent of old generation" />
+    <Field type="ulong" contentType="bytes" name="targetOccupancy" label="Target Occupancy" description="Target old generation occupancy to reach at the start of mixed GC" />
+    <Field type="ulong" contentType="bytes" name="currentOccupancy" label="Current Occupancy" description="Current old generation occupancy" />
+    <Field type="ulong" contentType="bytes" name="recentMutatorAllocationSize" label="Recent Mutator Allocation Size"
+      description="Mutator allocation during mutator operation in the most recent interval" />
+    <Field type="long" contentType="millis" name="recentMutatorDuration" label="Recent Mutator Duration" description="Time the mutator ran in the most recent interval" />
+    <Field type="double" name="recentAllocationRate" label="Recent Allocation Rate" description="Allocation rate of the mutator in the most recent interval in bytes/second" />
+    <Field type="long" contentType="millis" name="lastMarkingDuration" label="Last Marking Duration" description="Last time from the end of the last initial mark to the first mixed GC" />
+  </Event>
+
+  <Event name="G1AdaptiveIHOP" category="Java Virtual Machine, GC, Detailed" label="G1 Adaptive IHOP Statistics" startTime="false"
+    description="Statistics related to current adaptive IHOP calculation">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="ulong" contentType="bytes" name="threshold" label="Threshold" description="Current IHOP Threshold" />
+    <Field type="float" contentType="percentage" name="thresholdPercentage" label="Threshold" description="Current IHOP threshold in percent of the internal target occupancy" />
+    <Field type="ulong" contentType="bytes" name="ihopTargetOccupancy" label="IHOP Target Occupancy" description="Internal target old generation occupancy to reach at the start of mixed GC" />
+    <Field type="ulong" contentType="bytes" name="currentOccupancy" label="Current Occupancy" description="Current old generation occupancy" />
+    <Field type="ulong" contentType="bytes" name="additionalBufferSize" label="Additional Buffer" description="Additional buffer size" experimental="true" />
+    <Field type="double" name="predictedAllocationRate" label="Predicted Allocation Rate" description="Current predicted allocation rate for the mutator in bytes/second" />
+    <Field type="long" contentType="millis" name="predictedMarkingDuration" label="Predicted Marking Duration"
+      description="Current predicted time from the end of the last initial mark to the first mixed GC" />
+    <Field type="boolean" name="predictionActive" label="Prediction Active" description="Indicates whether the adaptive IHOP prediction is active" />
+  </Event>
+
+  <Event name="PromoteObjectInNewPLAB" category="Java Virtual Machine, GC, Detailed" label="Promotion in new PLAB"
+    description="Object survived scavenge and was copied to a new Promotion Local Allocation Buffer (PLAB). Supported GCs are Parallel Scavange, G1 and CMS with Parallel New. Due to promotion being done in parallel an object might be reported multiple times as the GC threads race to copy all objects."
+    thread="true" stackTrace="false" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" description="Identifier signifying GC during which the object was promoted" />
+    <Field type="Class" name="objectClass" label="Object Class" description="Class of promoted object" />
+    <Field type="ulong" contentType="bytes" name="objectSize" label="Object Size" description="Size of promoted object" />
+    <Field type="uint" name="tenuringAge" label="Object Tenuring Age"
+      description="Tenuring age of a surviving object before being copied. The tenuring age of an object is a value between 0-15 and is incremented each scavange the object survives. Newly allocated objects have tenuring age 0." />
+    <Field type="boolean" name="tenured" label="Tenured" description="True if object was promoted to Old space, otherwise the object was aged and copied to a Survivor space" />
+    <Field type="ulong" contentType="bytes" name="plabSize" label="PLAB Size" description="Size of the allocated PLAB to which the object was copied" />
+  </Event>
+
+  <Event name="PromoteObjectOutsidePLAB" category="Java Virtual Machine, GC, Detailed" label="Promotion outside PLAB"
+    description="Object survived scavenge and was copied directly to the heap. Supported GCs are Parallel Scavange, G1 and CMS with Parallel New. Due to promotion being done in parallel an object might be reported multiple times as the GC threads race to copy all objects."
+    thread="true" stackTrace="false" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" description="Identifier signifying GC during which the object was promoted" />
+    <Field type="Class" name="objectClass" label="Object Class" description="Class of promoted object" />
+    <Field type="ulong" contentType="bytes" name="objectSize" label="Object Size" description="Size of promoted object" />
+    <Field type="uint" name="tenuringAge" label="Object Tenuring Age"
+      description="Tenuring age of a surviving object before being copied. The tenuring age of an object is a value between 0-15 and is incremented each scavange the object survives. Newly allocated objects have tenuring age 0." />
+    <Field type="boolean" name="tenured" label="Tenured" description="True if object was promoted to Old space, otherwise the object was aged and copied to a Survivor space" />
+  </Event>
+
+  <Event name="PromotionFailed" category="Java Virtual Machine, GC, Detailed" label="Promotion Failed" startTime="false" description="Promotion of an object failed">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="CopyFailed" struct="true" name="promotionFailed" label="Promotion Failed Data" />
+    <Field type="Thread" name="thread" label="Running thread" />
+  </Event>
+
+  <Event name="EvacuationFailed" category="Java Virtual Machine, GC, Detailed" label="Evacuation Failed" startTime="false" description="Evacuation of an object failed">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="CopyFailed" struct="true" name="evacuationFailed" label="Evacuation Failed Data" />
+  </Event>
+
+  <Event name="ConcurrentModeFailure" category="Java Virtual Machine, GC, Detailed" label="Concurrent Mode Failure" startTime="false" description="Concurrent Mode failed">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+  </Event>
+
+  <Event name="GCPhasePause" category="Java Virtual Machine, GC, Phases" label="GC Phase Pause" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="GCPhasePauseLevel1" category="Java Virtual Machine, GC, Phases" label="GC Phase Pause Level 1" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="GCPhasePauseLevel2" category="Java Virtual Machine, GC, Phases" label="GC Phase Pause Level 2" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="GCPhasePauseLevel3" category="Java Virtual Machine, GC, Phases" label="GC Phase Pause Level 3" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="GCPhasePauseLevel4" category="Java Virtual Machine, GC, Phases" label="GC Phase Pause Level 4" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="GCPhaseConcurrent" category="Java Virtual Machine, GC, Phases" label="GC Phase Concurrent" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="AllocationRequiringGC" category="Java Virtual Machine, GC, Detailed" label="Allocation Requiring GC" thread="true" stackTrace="true"
+    startTime="false">
+    <Field type="uint" name="gcId" label="Pending GC Identifier" relation="GcId" />
+    <Field type="ulong" contentType="bytes" name="size" label="Allocation Size" />
+  </Event>
+
+  <Event name="TenuringDistribution" category="Java Virtual Machine, GC, Detailed" label="Tenuring Distribution" startTime="false">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="uint" name="age" label="Age" />
+    <Field type="ulong" contentType="bytes" name="size" label="Size" />
+  </Event>
+
+  <Event name="G1HeapRegionTypeChange" category="Java Virtual Machine, GC, Detailed" label="G1 Heap Region Type Change" description="Information about a G1 heap region type change"
+    startTime="false">
+    <Field type="uint" name="index" label="Index" />
+    <Field type="G1HeapRegionType" name="from" label="From" />
+    <Field type="G1HeapRegionType" name="to" label="To" />
+    <Field type="ulong" contentType="address" name="start" label="Start" />
+    <Field type="ulong" contentType="bytes" name="used" label="Used" />
+  </Event>
+
+  <Event name="Compilation" category="Java Virtual Machine, Compiler" label="Compilation" thread="true">
+    <Field type="Method" name="method" label="Java Method" />
+    <Field type="uint" name="compileId" label="Compilation Identifier" relation="CompileId" />
+    <Field type="ushort" name="compileLevel" label="Compilation Level" />
+    <Field type="boolean" name="succeded" label="Succeeded" />
+    <Field type="boolean" name="isOsr" label="On Stack Replacement" />
+    <Field type="ulong" contentType="bytes" name="codeSize" label="Compiled Code Size" />
+    <Field type="ulong" contentType="bytes" name="inlinedBytes" label="Inlined Code Size" />
+  </Event>
+
+  <Event name="CompilerPhase" category="Java Virtual Machine, Compiler" label="Compiler Phase" thread="true" >
+    <Field type="CompilerPhaseType" name="phase" label="Compile Phase" />
+    <Field type="uint" name="compileId" label="Compilation Identifier" relation="CompileId" />
+    <Field type="ushort" name="phaseLevel" label="Phase Level" />
+  </Event>
+
+  <Event name="CompilationFailure" category="Java Virtual Machine, Compiler" label="Compilation Failure" thread="true"  startTime="false">
+    <Field type="string" name="failureMessage" label="Failure Message" />
+    <Field type="uint" name="compileId" label="Compilation Identifier" relation="CompileId" />
+  </Event>
+  
+  <Type name="CalleeMethod">
+    <Field type="string" name="type" label="Class" />
+    <Field type="string" name="name" label="Method Name" />
+    <Field type="string" name="descriptor" label="Method Descriptor" />
+  </Type>
+
+  <Event name="CompilerInlining" category="Java Virtual Machine, Compiler, Optimization" label="Method Inlining" thread="true" startTime="false">
+    <Field type="uint" name="compileId" label="Compilation Identifier" relation="CompileId" />
+    <Field type="Method" name="caller" label="Caller Method" />
+    <Field type="CalleeMethod" name="callee" struct="true" label="Callee Method" />
+    <Field type="boolean" name="succeeded" label="Succeeded" />
+    <Field type="string" name="message" label="Message" />
+    <Field type="int" name="bci" label="Byte Code Index" />
+  </Event>
+
+  <Event name="SweepCodeCache" category="Java Virtual Machine, Code Sweeper" label="Sweep Code Cache" thread="true" >
+    <Field type="int" name="sweepId" label="Sweep Identifier" relation="SweepId" />
+    <Field type="uint" name="sweptCount" label="Methods Swept" />
+    <Field type="uint" name="flushedCount" label="Methods Flushed" />
+    <Field type="uint" name="zombifiedCount" label="Methods Zombified" />
+  </Event>
+
+  <Event name="CodeCacheFull" category="Java Virtual Machine, Code Cache" label="Code Cache Full" thread="true" startTime="false">
+    <Field type="CodeBlobType" name="codeBlobType" label="Code Heap" />
+    <Field type="ulong" contentType="address" name="startAddress" label="Start Address" />
+    <Field type="ulong" contentType="address" name="commitedTopAddress" label="Commited Top" />
+    <Field type="ulong" contentType="address" name="reservedTopAddress" label="Reserved Top" />
+    <Field type="int" name="entryCount" label="Entries" />
+    <Field type="int" name="methodCount" label="Methods" />
+    <Field type="int" name="adaptorCount" label="Adaptors" />
+    <Field type="ulong" contentType="bytes" name="unallocatedCapacity" label="Unallocated" />
+    <Field type="int" name="fullCount" label="Full Count" />
+  </Event>
+
+  <Event name="SafepointBegin" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint Begin" description="Safepointing begin" thread="true">
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+    <Field type="int" name="totalThreadCount" label="Total Threads" description="The total number of threads at the start of safe point" />
+    <Field type="int" name="jniCriticalThreadCount" label="JNI Critical Threads" description="The number of threads in JNI critical sections" />
+  </Event>
+
+  <Event name="SafepointStateSynchronization" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint State Synchronization" description="Synchronize run state of threads"
+    thread="true">
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+    <Field type="int" name="initialThreadCount" label="Initial Threads" description="The number of threads running at the beginning of state check" />
+    <Field type="int" name="runningThreadCount" label="Running Threads" description="The number of threads still running" />
+    <Field type="int" name="iterations" label="Iterations" description="Number of state check iterations" />
+  </Event>
+
+  <Event name="SafepointWaitBlocked" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint Wait Blocked" description="Safepointing begin waiting on running threads to block"
+    thread="true">
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+    <Field type="int" name="runningThreadCount" label="Running Threads" description="The number running of threads wait for safe point" />
+  </Event>
+
+  <Event name="SafepointCleanup" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint Cleanup" description="Safepointing begin running cleanup tasks"
+    thread="true">
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+  </Event>
+
+  <Event name="SafepointCleanupTask" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint Cleanup Task" description="Safepointing begin running cleanup tasks"
+    thread="true">
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+    <Field type="string" name="name" label="Task Name" description="The task name" />
+  </Event>
+
+  <Event name="SafepointEnd" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint End" description="Safepointing end" thread="true">
+    <Field type="int" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
+  </Event>
+
+  <Event name="ExecuteVMOperation" category="Java Virtual Machine, Runtime" label="VM Operation" description="Execution of a VM Operation" thread="true">
+    <Field type="VMOperationType" name="operation" label="Operation" />
+    <Field type="boolean" name="safepoint" label="At Safepoint" description="If the operation occured at a safepoint" />
+    <Field type="boolean" name="blocking" label="Caller Blocked" description="If the calling thread was blocked until the operation was complete" />
+    <Field type="Thread" name="caller" label="Caller" transition="from"
+      description="Thread requesting operation. If non-blocking, will be set to 0 indicating thread is unknown" />
+    <Field type="int" name="safepointId" label="Safepoint Identifier" description="The safepoint (if any) under which this operation was completed"
+      relation="SafepointId" />
+  </Event>
+
+  <Event name="Shutdown" category="Java Virtual Machine, Runtime" label="VM Shutdown" description="VM shutting down" thread="true" stackTrace="true"
+    startTime="false">
+    <Field type="string" name="reason" label="Reason" description="Reason for VM shutdown" />
+  </Event>
+
+  <Event name="ObjectAllocationInNewTLAB" category="Java Application" label="Allocation in new TLAB" description="Allocation in new Thread Local Allocation Buffer"
+    thread="true" stackTrace="true" startTime="false">
+    <Field type="Class" name="objectClass" label="Object Class" description="Class of allocated object" />
+    <Field type="ulong" contentType="bytes" name="allocationSize" label="Allocation Size" />
+    <Field type="ulong" contentType="bytes" name="tlabSize" label="TLAB Size" />
+  </Event>
+
+  <Event name="ObjectAllocationOutsideTLAB" category="Java Application" label="Allocation outside TLAB" description="Allocation outside Thread Local Allocation Buffers"
+    thread="true" stackTrace="true" startTime="false">
+    <Field type="Class" name="objectClass" label="Object Class" description="Class of allocated object" />
+    <Field type="ulong" contentType="bytes" name="allocationSize" label="Allocation Size" />
+  </Event>
+
+  <Event name="OldObjectSample" category="Java Application" label="Old Object Sample" description="A potential memory leak" stackTrace="true" thread="true"
+    startTime="false" cutoff="true">
+    <Field type="Ticks" name="allocationTime" label="Allocation Time" />
+    <Field type="ulong" contentType="bytes" name="lastKnownHeapUsage" label="Last Known Heap Usage" />
+    <Field type="OldObject" name="object" label="Object" />
+    <Field type="int" name="arrayElements" label="Array Elements" description="If the object is an array, the number of elements, or -1 if it is not an array" />
+    <Field type="OldObjectGcRoot" name="root" label="GC Root" />
+  </Event>
+
+  <Event name="DumpReason" category="Flight Recorder" label="Recording Reason" 
+         description="Who requested the recording and why" 
+         startTime="false">
+    <Field type="string" name="reason" label="Reason" description="Reason for writing recording data to disk" />
+    <Field type="int" name="recordingId" label="Recording Id" description="Id of the recording that triggered the dump, or -1 if it was not related to a recording" />
+  </Event>
+
+  <Event name="DataLoss" category="Flight Recorder" label="Data Loss" 
+         description="Data could not be copied out from a buffer, typically because of contention"
+         startTime="false">
+    <Field type="ulong" contentType="bytes" name="amount" label="Amount" description="Amount lost data" />
+    <Field type="ulong" contentType="bytes" name="total" label="Total" description="Total lost amount for thread" />
+  </Event>
+
+  <Event name="JVMInformation" category="Java Virtual Machine" label="JVM Information" 
+         description="Description of JVM and the Java application"
+         period="endChunk">
+    <Field type="string" name="jvmName" label="JVM Name" />
+    <Field type="string" name="jvmVersion" label="JVM Version" />
+    <Field type="string" name="jvmArguments" label="JVM Command Line Arguments" />
+    <Field type="string" name="jvmFlags" label="JVM Settings File Arguments" />
+    <Field type="string" name="javaArguments" label="Java Application Arguments" />
+    <Field type="long" contentType="epochmillis" name="jvmStartTime" label="JVM Start Time" />
+  </Event>
+
+  <Event name="OSInformation" category="Operating System" label="OS Information" period="endChunk">
+    <Field type="string" name="osVersion" label="OS Version" />
+  </Event>
+
+  <Event name="InitialSystemProperty" category="Java Virtual Machine" label="Initial System Property" description="System Property at JVM start" period="endChunk">
+    <Field type="string" name="key" label="Key" />
+    <Field type="string" name="value" label="Value" />
+  </Event>
+
+  <Event name="InitialEnvironmentVariable" category="Operating System" label="Initial Environment Variable" period="endChunk">
+    <Field type="string" name="key" label="Key" />
+    <Field type="string" name="value" label="Value" />
+  </Event>
+
+  <Event name="SystemProcess" category="Operating System" label="System Process" period="endChunk">
+    <Field type="string" name="pid" label="Process Identifier" />
+    <Field type="string" name="commandLine" label="Command Line" />
+  </Event>
+
+  <Event name="CPUInformation" category="Operating System, Processor" label="CPU Information" period="endChunk">
+    <Field type="string" name="cpu" label="Type" />
+    <Field type="string" name="description" label="Description" />
+    <Field type="uint" name="sockets" label="Sockets" />
+    <Field type="uint" name="cores" label="Cores" />
+    <Field type="uint" name="hwThreads" label="Hardware Threads" />
+  </Event>
+
+  <Event name="CPUTimeStampCounter" category="Operating System, Processor" label="CPU Time Stamp Counter" period="endChunk">
+    <Field type="boolean" name="fastTimeEnabled" label="Fast Time" />
+    <Field type="boolean" name="fastTimeAutoEnabled" label="Trusted Platform" />
+    <Field type="long" name="osFrequency" label="OS Frequency Per Second" />
+    <Field type="long" name="fastTimeFrequency" label="Fast Time Frequency per Second" />
+  </Event>
+
+  <Event name="CPULoad" category="Operating System, Processor" label="CPU Load" description="OS CPU Load" period="everyChunk">
+    <Field type="float" contentType="percentage" name="jvmUser" label="JVM User" />
+    <Field type="float" contentType="percentage" name="jvmSystem" label="JVM System" />
+    <Field type="float" contentType="percentage" name="machineTotal" label="Machine Total" />
+  </Event>
+
+  <Event name="ThreadCPULoad" category="Operating System, Processor" label="Thread CPU Load" period="everyChunk" thread="true">
+    <Field type="float" contentType="percentage" name="user" label="User Mode CPU Load" description="User mode thread CPU load" />
+    <Field type="float" contentType="percentage" name="system" label="System Mode CPU Load" description="System mode thread CPU load" />
+  </Event>
+
+  <Event name="ThreadContextSwitchRate" category="Operating System, Processor" label="Thread Context Switch Rate" period="everyChunk">
+    <Field type="float" name="switchRate" label="Switch Rate" description="Number of context switches per second" />
+  </Event>
+
+  <Event name="NetworkUtilization" category="Operating System, Network" label="Network Utilization" period="everyChunk">
+    <Field type="NetworkInterfaceName" name="networkInterface" label="Network Interface" description="Network Interface Name"/>
+    <Field type="long" contentType="bytes" name="readRate" label="Read Rate" description="Number of incoming bytes per second"/>
+    <Field type="long" contentType="bytes" name="writeRate" label="Write Rate" description="Number of outgoing bytes per second"/>
+  </Event>
+
+  <Event name="JavaThreadStatistics" category="Java Application, Statistics" label="Java Thread Statistics" period="everyChunk">
+    <Field type="long" name="activeCount" label="Active Threads" description="Number of live active threads including both daemon and non-daemon threads" />
+    <Field type="long" name="daemonCount" label="Daemon Threads" description="Number of live daemon threads" />
+    <Field type="long" name="accumulatedCount" label="Accumulated Threads" description="Number of threads created and also started since JVM start" />
+    <Field type="long" name="peakCount" label="Peak Threads" description="Peak live thread count since JVM start or when peak count was reset" />
+  </Event>
+
+  <Event name="ClassLoadingStatistics" category="Java Application, Statistics" label="Class Loading Statistics" period="everyChunk">
+    <Field type="long" name="loadedClassCount" label="Loaded Class Count" description="Number of classes loaded since JVM start" />
+    <Field type="long" name="unloadedClassCount" label="Unloaded Class Count" description="Number of classes unloaded since JVM start" />
+  </Event>
+
+  <Event name="ClassLoaderStatistics" category="Java Application, Statistics" label="Class Loader Statistics" period="everyChunk">
+    <Field type="ClassLoader" name="classLoader" label="Class Loader" />
+    <Field type="ClassLoader" name="parentClassLoader" label="Parent Class Loader" />
+    <Field type="ulong" contentType="address" name="classLoaderData" label="ClassLoaderData pointer" description="Pointer to the ClassLoaderData structure in the JVM" />
+    <Field type="long" name="classCount" label="Classes" description="Number of loaded classes" />
+    <Field type="ulong" contentType="bytes" name="chunkSize" label="Total Chunk Size" description="Total size of all allocated metaspace chunks (each chunk has several blocks)" />
+    <Field type="ulong" contentType="bytes" name="blockSize" label="Total Block Size" description="Total size of all allocated metaspace blocks (each chunk has several blocks)" />
+    <Field type="long" name="anonymousClassCount" label="Unsafe Anonymous Classes" description="Number of loaded classes to support invokedynamic" />
+    <Field type="ulong" contentType="bytes" name="anonymousChunkSize" label="Total Unsafe Anonymous Classes Chunk Size"
+      description="Total size of all allocated metaspace chunks for anonymous classes (each chunk has several blocks)" />
+    <Field type="ulong" contentType="bytes" name="anonymousBlockSize" label="Total Unsafe Anonymous Classes Block Size"
+      description="Total size of all allocated metaspace blocks for anonymous classes (each chunk has several blocks)" />
+  </Event>
+
+  <Event name="ThreadAllocationStatistics" category="Java Application, Statistics" label="Thread Allocation Statistics" period="everyChunk">
+    <Field type="ulong" contentType="bytes" name="allocated" label="Allocated" description="Approximate number of bytes allocated since thread start" />
+    <Field type="Thread" name="thread" label="Thread" />
+  </Event>
+
+  <Event name="PhysicalMemory" category="Operating System, Memory" label="Physical Memory" description="OS Physical Memory" period="everyChunk">
+    <Field type="ulong" contentType="bytes" name="totalSize" label="Total Size" description="Total amount of physical memory available to OS" />
+    <Field type="ulong" contentType="bytes" name="usedSize" label="Used Size" description="Total amount of physical memory in use" />
+  </Event>
+
+  <Event name="ExecutionSample" category="Java Virtual Machine, Profiling" label="Method Profiling Sample" description="Snapshot of a threads state"
+    period="everyChunk">
+    <Field type="Thread" name="sampledThread" label="Thread" />
+    <Field type="StackTrace" name="stackTrace" label="Stack Trace" />
+    <Field type="ThreadState" name="state" label="Thread State" />
+  </Event>
+
+  <Event name="NativeMethodSample" category="Java Virtual Machine, Profiling" label="Method Profiling Sample Native" description="Snapshot of a threads state when in native"
+    period="everyChunk">
+    <Field type="Thread" name="sampledThread" label="Thread" />
+    <Field type="StackTrace" name="stackTrace" label="Stack Trace" />
+    <Field type="ThreadState" name="state" label="Thread State" />
+  </Event>
+
+  <Event name="ThreadDump" category="Java Virtual Machine, Runtime" label="Thread Dump" period="everyChunk">
+    <Field type="string" name="result" label="Thread Dump" />
+  </Event>
+
+  <Event name="NativeLibrary" category="Java Virtual Machine, Runtime" label="Native Library" period="everyChunk">
+    <Field type="string" name="name" label="Name" />
+    <Field type="ulong" contentType="address" name="baseAddress" label="Base Address" description="Starting address of the module" />
+    <Field type="ulong" contentType="address" name="topAddress" label="Top Address" description="Ending address of the module" />
+  </Event>
+
+  <!-- XXX
+  <Event name="ModuleRequire" category="Java Virtual Machine, Runtime, Modules" label="Module Require" thread="false" period="everyChunk"
+    description="A directed edge representing a dependency">
+    <Field type="Module" name="source" label="Source Module" />
+    <Field type="Module" name="requiredModule" label="Required Module" />
+  </Event>
+
+  <Event name="ModuleExport" category="Java Virtual Machine, Runtime, Modules" label="Module Export" thread="false" period="everyChunk">
+    <Field type="Package" name="exportedPackage" label="Exported Package" />
+    <Field type="Module" name="targetModule" label="Target Module"
+      description="Module to which the package is qualifiedly exported.
+             If null, the package is unqualifiedly exported" />
+  </Event>
+  -->
+  <Event name="CompilerStatistics" category="Java Virtual Machine, Compiler" label="Compiler Statistics" thread="false" period="everyChunk" startTime="false">
+    <Field type="int" name="compileCount" label="Compiled Methods" />
+    <Field type="int" name="bailoutCount" label="Bailouts" />
+    <Field type="int" name="invalidatedCount" label="Invalidated Compilations" />
+    <Field type="int" name="osrCompileCount" label="OSR Compilations" />
+    <Field type="int" name="standardCompileCount" label="Standard Compilations" />
+    <Field type="ulong" contentType="bytes" name="osrBytesCompiled" label="OSR Bytes Compiled" />
+    <Field type="ulong" contentType="bytes" name="standardBytesCompiled" label="Standard Bytes Compiled" />
+    <Field type="ulong" contentType="bytes" name="nmetodsSize" label="Compilation Resulting Size" />
+    <Field type="ulong" contentType="bytes" name="nmetodCodeSize" label="Compilation Resulting Code Size" />
+    <Field type="long" contentType="millis" name="peakTimeSpent" label="Peak Time" />
+    <Field type="long" contentType="millis" name="totalTimeSpent" label="Total time" />
+  </Event>
+
+  <Event name="CompilerConfiguration" category="Java Virtual Machine, Compiler" label="Compiler Configuration" thread="false" period="endChunk" startTime="false">
+    <Field type="int" name="threadCount" label="Thread Count" />
+    <Field type="boolean" name="tieredCompilation" label="Tiered Compilation" />
+  </Event>
+
+  <Event name="CodeCacheStatistics" category="Java Virtual Machine, Code Cache" label="Code Cache Statistics" thread="false" period="everyChunk" startTime="false">
+    <Field type="CodeBlobType" name="codeBlobType" label="Code Heap" />
+    <Field type="ulong" contentType="address" name="startAddress" label="Start Address" />
+    <Field type="ulong" contentType="address" name="reservedTopAddress" label="Reserved Top" />
+    <Field type="int" name="entryCount" label="Entries" />
+    <Field type="int" name="methodCount" label="Methods" />
+    <Field type="int" name="adaptorCount" label="Adaptors" />
+    <Field type="ulong" contentType="bytes" name="unallocatedCapacity" label="Unallocated" />
+    <Field type="int" name="fullCount" label="Full Count" />
+  </Event>
+
+  <Event name="CodeCacheConfiguration" category="Java Virtual Machine, Code Cache" label="Code Cache Configuration" thread="false" period="endChunk" startTime="false">
+    <Field type="ulong" contentType="bytes" name="initialSize" label="Initial Size" />
+    <Field type="ulong" contentType="bytes" name="reservedSize" label="Reserved Size" />
+    <Field type="ulong" contentType="bytes" name="nonNMethodSize" label="Non-nmethod Size" />
+    <Field type="ulong" contentType="bytes" name="profiledSize" label="Profiled Size" />
+    <Field type="ulong" contentType="bytes" name="nonProfiledSize" label="Non-profiled Size" />
+    <Field type="ulong" contentType="bytes" name="expansionSize" label="Expansion size" />
+    <Field type="ulong" contentType="bytes" name="minBlockLength" label="Minimum Block Length" />
+    <Field type="ulong" contentType="address" name="startAddress" label="Start Address" />
+    <Field type="ulong" contentType="address" name="reservedTopAddress" label="Reserved Top" />
+  </Event>
+
+  <Event name="CodeSweeperStatistics" category="Java Virtual Machine, Code Sweeper" label="Code Sweeper Statistics" thread="false" period="everyChunk" startTime="false">
+    <Field type="int" name="sweepCount" label="Sweeps" />
+    <Field type="int" name="methodReclaimedCount" label="Methods Reclaimed" />
+    <Field type="Tickspan" name="totalSweepTime" label="Time Spent Sweeping" />
+    <Field type="Tickspan" name="peakFractionTime" label="Peak Time Fraction Sweep" />
+    <Field type="Tickspan" name="peakSweepTime" label="Peak Time Full Sweep" />
+  </Event>
+
+  <Event name="CodeSweeperConfiguration" category="Java Virtual Machine, Code Sweeper" label="Code Sweeper Configuration" thread="false" period="endChunk" startTime="false">
+    <Field type="boolean" name="sweeperEnabled" label="Code Sweeper Enabled" />
+    <Field type="boolean" name="flushingEnabled" label="Code Cache Flushing Enabled" />
+  </Event>
+
+  <Event name="IntFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Int Flag">
+    <Field type="string" name="name" label="Name" />
+    <Field type="int" name="value" label="Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="UnsignedIntFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Unsigned Int Flag">
+    <Field type="string" name="name" label="Name" />
+    <Field type="uint" name="value" label="Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="LongFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Long Flag">
+    <Field type="string" name="name" label="Name" />
+    <Field type="long" name="value" label="Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="UnsignedLongFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Unsigned Long Flag">
+    <Field type="string" name="name" label="Name" />
+    <Field type="ulong" name="value" label="Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="DoubleFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Double Flag">
+    <Field type="string" name="name" label="Name" />
+    <Field type="double" name="value" label="Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="BooleanFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Boolean Flag">
+    <Field type="string" name="name" label="Name" />
+    <Field type="boolean" name="value" label="Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="StringFlag" category="Java Virtual Machine, Flag" period="endChunk" label="String Flag">
+    <Field type="string" name="name" label="Name" />
+    <Field type="string" name="value" label="Value" />
+    <Field type="FlagValueOrigin" name="origin" label="Origin" />
+  </Event>
+
+  <Event name="ObjectCount" category="Java Virtual Machine, GC, Detailed" startTime="false" period="everyChunk" label="Object Count">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId" />
+    <Field type="Class" name="objectClass" label="Object Class" />
+    <Field type="long" name="count" label="Count" />
+    <Field type="ulong" contentType="bytes" name="totalSize" label="Total Size" />
+  </Event>
+
+  <Event name="G1HeapRegionInformation" category="Java Virtual Machine, GC, Detailed" label="G1 Heap Region Information" description="Information about a specific heap region in the G1 GC"
+    period="everyChunk">
+    <Field type="uint" name="index" label="Index" />
+    <Field type="G1HeapRegionType" name="type" label="Type" />
+    <Field type="ulong" contentType="address" name="start" label="Start" />
+    <Field type="ulong" contentType="bytes" name="used" label="Used" />
+  </Event>
+
+  <Event name="GCConfiguration" category="Java Virtual Machine, GC, Configuration" label="GC Configuration" description="The configuration of the garbage collector"
+    period="endChunk">
+    <Field type="GCName" name="youngCollector" label="Young Garbage Collector" description="The garbage collector used for the young generation" />
+    <Field type="GCName" name="oldCollector" label="Old Garbage Collector" description="The garbage collector used for the old generation" />
+    <Field type="uint" name="parallelGCThreads" label="Parallel GC Threads" description="Number of parallel threads to use for garbage collection" />
+    <Field type="uint" name="concurrentGCThreads" label="Concurrent GC Threads" description="Number of concurrent threads to use for garbage collection" />
+    <Field type="boolean" name="usesDynamicGCThreads" label="Uses Dynamic GC Threads" description="Whether a dynamic number of GC threads are used or not" />
+    <Field type="boolean" name="isExplicitGCConcurrent" label="Concurrent Explicit GC" description="Whether System.gc() is concurrent or not" />
+    <Field type="boolean" name="isExplicitGCDisabled" label="Disabled Explicit GC" description="Whether System.gc() will cause a garbage collection or not" />
+    <Field type="long" contentType="millis" name="pauseTarget" label="Pause Target" description="Target for GC pauses" />
+    <Field type="uint" name="gcTimeRatio" label="GC Time Ratio" description="Target for runtime vs garbage collection time" />
+  </Event>
+
+  <Event name="GCSurvivorConfiguration" category="Java Virtual Machine, GC, Configuration" label="GC Survivor Configuration"
+    description="The configuration of the survivors of garbage collection" period="endChunk">
+    <Field type="ubyte" name="maxTenuringThreshold" label="Maximum Tenuring Threshold" description="Upper limit for the age of how old objects to keep in the survivor area" />
+    <Field type="ubyte" name="initialTenuringThreshold" label="Initial Tenuring Threshold" description="Initial age limit for how old objects to keep in survivor area" />
+  </Event>
+
+  <Event name="GCTLABConfiguration" category="Java Virtual Machine, GC, Configuration" label="TLAB Configuration"
+    description="The configuration of the Thread Local Allocation Buffers (TLABs)" period="endChunk">
+    <Field type="boolean" name="usesTLABs" label="TLABs Used" description="If Thread Local Allocation Buffers (TLABs) are in use" />
+    <Field type="ulong" contentType="bytes" name="minTLABSize" label="Minimum TLAB Size" />
+    <Field type="ulong" contentType="bytes" name="tlabRefillWasteLimit" label="TLAB Refill Waste Limit" />
+  </Event>
+
+  <Event name="GCHeapConfiguration" category="Java Virtual Machine, GC, Configuration" label="GC Heap Configuration" description="The configuration of the garbage collected heap"
+    period="endChunk">
+    <Field type="ulong" contentType="bytes" name="minSize" label="Minimum Heap Size" />
+    <Field type="ulong" contentType="bytes" name="maxSize" label="Maximum Heap Size" />
+    <Field type="ulong" contentType="bytes" name="initialSize" label="Initial Heap Size" />
+    <Field type="boolean" name="usesCompressedOops" label="If Compressed Oops Are Used" description="If compressed Oops (Ordinary Object Pointers) are enabled" />
+    <Field type="NarrowOopMode" name="compressedOopsMode" label="Compressed Oops Mode" description="The kind of compressed oops being used" />
+    <Field type="ulong" contentType="bytes" name="objectAlignment" label="Object Alignment" description="Object alignment (in bytes) on the heap" />
+    <Field type="ubyte" name="heapAddressBits" label="Heap Address Size" description="Heap Address Size (in bits)" />
+  </Event>
+
+  <Event name="YoungGenerationConfiguration" category="Java Virtual Machine, GC, Configuration" label="Young Generation Configuration"
+    description="The configuration of the young generation of the garbage collected heap" period="endChunk">
+    <Field type="ulong" contentType="bytes" name="minSize" label="Minimum Young Generation Size" />
+    <Field type="ulong" contentType="bytes" name="maxSize" label="Maximum Young Generation Size" />
+    <Field type="uint" name="newRatio" label="New Ratio" description="The size of the young generation relative to the tenured generation" />
+  </Event>
+
+  <Event name="ZPageAllocation" category="Java Application" label="ZPage Allocation" description="Allocation of a ZPage" thread="true" stackTrace="false">
+     <Field type="ulong" contentType="bytes" name="pageSize" label="Page Size" />
+     <Field type="ulong" contentType="bytes" name="usedAfter" label="Used After" />
+     <Field type="ulong" contentType="bytes" name="freeAfter" label="Free After" />
+     <Field type="ulong" contentType="bytes" name="inCacheAfter" label="In Cache After" />
+     <Field type="boolean" name="nonBlocking" label="Non-blocking" />
+     <Field type="boolean" name="noReserve" label="No Reserve" />
+  </Event>
+
+  <Event name="ZThreadPhase" category="Java Virtual Machine, GC, Detailed" label="ZGC Thread Phase" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId"/>
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="ZStatisticsCounter" category="Java Virtual Machine, GC, Detailed" label="Z Statistics Counter" thread="true">
+    <Field type="ZStatisticsCounterType" name="id" label="Id" />
+    <Field type="ulong" name="increment" label="Increment" />
+    <Field type="ulong" name="value" label="Value" />
+  </Event>
+
+  <Event name="ZStatisticsSampler" category="Java Virtual Machine, GC, Detailed" label="Z Statistics Sampler" thread="true">
+    <Field type="ZStatisticsSamplerType" name="id" label="Id" />
+    <Field type="ulong" name="value" label="Value" />
+  </Event>
+
+  <Type name="ZStatisticsCounterType" label="Z Statistics Counter">
+    <Field type="string" name="counter" label="Counter" />
+  </Type>
+
+  <Type name="ZStatisticsSamplerType" label="Z Statistics Sampler">
+    <Field type="string" name="sampler" label="Sampler" />
+  </Type>
+
+  <Type name="NetworkInterfaceName" label="Network Interface">
+    <Field type="string" name="networkInterface" label="Network Interface" description="Network Interface Name" />
+  </Type>
+
+  <Type name="Thread" label="Thread">
+    <Field type="string" name="osName" label="OS Thread Name" />
+    <Field type="long" name="osThreadId" label="OS Thread Id" />
+    <Field type="string" name="javaName" label="Java Thread Name" />
+    <Field type="long" name="javaThreadId" label="Java Thread Id" />
+    <Field type="ThreadGroup" name="group" label="Java Thread Group" />
+  </Type>
+
+  <Type name="ThreadGroup" label="Thread Group">
+    <Field type="ThreadGroup" name="parent" label="Parent" />
+    <Field type="string" name="name" label="Name" />
+  </Type>
+
+  <Type name="Class" label="Java Class">
+    <Field type="ClassLoader" name="classLoader" label="Class Loader" />
+    <Field type="Symbol" name="name" label="Name" />
+    <Field type="Package" name="package" label="Package" />
+    <Field type="int" name="modifiers" label="Access Modifiers" />
+  </Type>
+
+  <Type name="ClassLoader" label="Java Class Loader">
+    <Field type="Class" name="type" label="Type" />
+    <Field type="Symbol" name="name" label="Name" />
+  </Type>
+
+  <Type name="Method" label="Java Method">
+    <Field type="Class" name="type" label="Type" />
+    <Field type="Symbol" name="name" label="Name" />
+    <Field type="Symbol" name="descriptor" label="Descriptor" />
+    <Field type="int" name="modifiers" label="Access Modifiers" />
+    <Field type="boolean" name="hidden" label="Hidden" />
+  </Type>
+
+  <Type name="Symbol" label="Symbol">
+    <Field type="string" name="string" label="String" />
+  </Type>
+
+  <Type name="ThreadState" label="Java Thread State">
+    <Field type="string" name="name" label="Name" />
+  </Type>
+
+  <Type name="GCName" label="GC Name">
+    <Field type="string" name="name" label="Name" />
+  </Type>
+
+  <Type name="GCCause" label="GC Cause">
+    <Field type="string" name="cause" label="Cause" />
+  </Type>
+
+  <Type name="GCWhen" label="GC When">
+    <Field type="string" name="when" label="When" />
+  </Type>
+
+  <Type name="G1HeapRegionType" label="G1 Heap Region Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="G1YCType" label="G1 YC Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="GCThresholdUpdater" label="GC Threshold Updater">
+    <Field type="string" name="updater" label="Updater" />
+  </Type>
+
+  <Type name="ReferenceType" label="Reference Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="MetadataType" label="Metadata Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="MetaspaceObjectType" label="Metaspace Object Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="NarrowOopMode" label="Narrow Oop Mode">
+    <Field type="string" name="mode" label="Mode" />
+  </Type>
+
+  <Type name="VMOperationType" label="VM Operation Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="CompilerPhaseType" label="Compiler Phase Type">
+    <Field type="string" name="phase" label="Phase" />
+  </Type>
+
+  <Type name="FlagValueOrigin" label="Flag Value Origin">
+    <Field type="string" name="origin" label="Origin" />
+  </Type>
+
+  <Type name="CodeBlobType" label="Code Blob Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="InflateCause" label="Inflation Cause">
+    <Field type="string" name="cause" label="Cause" />
+  </Type>
+
+  <!--
+  <Type name="Module" label="Module">
+    <Field type="Symbol" name="name" label="Name" />
+    <Field type="Symbol" name="version" label="Version" />
+    <Field type="Symbol" name="location" label="Location" />
+    <Field type="ClassLoader" name="classLoader" label="Class Loader" />
+  </Type>
+  -->
+
+  <Type name="Package" label="Package">
+    <Field type="Symbol" name="name" label="Name" />
+    <!-- <Field type="Module" name="module" label="Module" /> -->
+    <Field type="boolean" name="exported" label="Exported" />
+  </Type>
+
+  <Type name="StackTrace" label="Stacktrace">
+    <Field type="boolean" name="truncated" label="Truncated" />
+    <Field type="StackFrame" array="true" struct="true" name="frames" label="Stack Frames" />
+  </Type>
+
+  <Type name="FrameType" label="Frame type">
+    <Field type="string" name="description" label="Description" />
+  </Type>
+
+  <Type name="OldObjectRootSystem" label="GC Root System">
+    <Field type="string" name="system" label="System" />
+  </Type>
+
+  <Type name="OldObjectRootType" label="GC Root Type">
+    <Field type="string" name="type" label="Type" />
+  </Type>
+
+  <Type name="OldObjectGcRoot" label="GC Root">
+    <Field type="string" name="description" label="Root Description" description="Root information" />
+    <Field type="OldObjectRootSystem" name="system" label="System" description="The subsystem of origin for the root" />
+    <Field type="OldObjectRootType" name="type" label="Type" description="The root type" />
+  </Type>
+
+  <Type name="OldObjectArray" label="Old Object Array">
+    <Field type="int" name="size" label="Array Size" description="Size of array" />
+    <Field type="int" name="index" label="Index" description="Index in the array" />
+  </Type>
+
+  <Type name="OldObjectField" label="Old Object Field">
+    <Field type="string" name="name" label="Field" description="Name of field" />
+    <Field type="short" name="modifiers" label="Field Modifiers" description="Field modifiers" />
+  </Type>
+
+  <Type name="OldObject" label="Old Object">
+    <Field type="ulong" contentType="address" name="address" label="Memory Address" />
+    <Field type="Class" name="type" label="Java Class" />
+    <Field type="string" name="description" label="Object Description" description="Object description" />
+    <Field type="Reference" name="referrer" label="Referrer Object" description="Object referencing this object" />
+  </Type>
+
+  <Type name="Reference" label="Reference">
+    <Field type="OldObjectArray" name="array" label="Array Information" description="Array or null if it is not an array" />
+    <Field type="OldObjectField" name="field" label="Field Information" description="Field or null if it is an array" />
+    <Field type="OldObject" name="object" label="Object" description="Object holder for this reference" />
+    <Field type="int" name="skip" label="Skip value" description="The object is this many hops away" />
+  </Type>
+
+  <Type name="StackFrame">
+    <Field type="Method" name="method" label="Java Method" />
+    <Field type="int" name="lineNumber" label="Line Number" />
+    <Field type="int" name="bytecodeIndex" label="Bytecode Index" />
+    <Field type="FrameType" name="type" label="Frame Type" />
+  </Type>
+ 
+  <Relation name="JavaMonitorAddress"/>
+  <Relation name="SafepointId"/>
+  <Relation name="GcId"/>
+  <Relation name="CompileId" />
+  <Relation name="SweepId"/>
+ 
+  <XmlType name="Package" parameterType="const PackageEntry*" fieldType="const PackageEntry*"/>
+  <XmlType name="Class" javaType="java.lang.Class" parameterType="const Klass*" fieldType="const Klass*"/> 
+  <XmlType name="ClassLoader" parameterType="const ClassLoaderData*" fieldType="const ClassLoaderData*"/> 
+  <XmlType name="Method" parameterType="const Method*" fieldType="const Method*"/> 
+  <XmlType name="Thread" javaType="java.lang.Thread" parameterType="u8" fieldType="u8"/> 
+  <XmlType name="Tickspan" contentType="tickspan" javaType="long" parameterType="const Tickspan&amp;" fieldType="Tickspan"/> 
+  <XmlType name="Ticks" contentType="tickstamp" javaType="long" parameterType="const Ticks&amp;" fieldType="Ticks"/> 
+  <XmlType name="ulong" javaType="long" unsigned="true" parameterType="u8" fieldType="u8"/> 
+  <XmlType name="uint" javaType="int" unsigned="true" parameterType="unsigned" fieldType="unsigned"/> 
+  <XmlType name="ushort" javaType="short" unsigned="true" parameterType="u2" fieldType="u2"/> 
+  <XmlType name="ubyte" javaType="byte" unsigned="true" parameterType="u1" fieldType="u1"/> 
+  <XmlType name="long" javaType="long" parameterType="s8" fieldType="s8"/> 
+  <XmlType name="int" javaType="int" parameterType="s4" fieldType="s4"/> 
+  <XmlType name="short" javaType="short" parameterType="s2" fieldType="s2"/> 
+  <XmlType name="byte" javaType="byte"  parameterType="s1" fieldType="s1"/> 
+  <XmlType name="double" javaType="double" parameterType="double" fieldType="double"/> 
+  <XmlType name="float" javaType="float"  parameterType="float" fieldType="float"/> 
+  <XmlType name="boolean" javaType="boolean" parameterType="bool" fieldType="bool"/> 
+  <XmlType name="char" javaType="char" parameterType="char" fieldType="char"/> 
+  <XmlType name="string" javaType="java.lang.String" parameterType="const char*" fieldType="const char*"/> 
+ 
+  <XmlContentType name="bytes" annotationType="jdk.jfr.DataAmount" annotationValue="BYTES" />
+  <XmlContentType name="tickstamp" annotationType="jdk.jfr.Timestamp" annotationValue="TICKS" />
+  <XmlContentType name="epochmillis" annotationType="jdk.jfr.Timestamp" annotationValue="MILLISECONDS_SINCE_EPOCH" />
+  <XmlContentType name="tickspan" annotationType="jdk.jfr.Timespan" annotationValue="TICKS" />
+  <XmlContentType name="address" annotationType="jdk.jfr.MemoryAddress" />
+  <XmlContentType name="percentage" annotationType="jdk.jfr.Percentage" />
+  <XmlContentType name="millis" annotationType="jdk.jfr.Timespan" annotationValue="MILLISECONDS" />
+
+</Metadata>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/metadata/metadata.xsd	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,123 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+
+-->
+
+<xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
+  <xs:simpleType name="periodType">
+    <xs:restriction base="xs:string">
+      <xs:enumeration value="beginChunk" />
+      <xs:enumeration value="endChunk" />
+      <xs:enumeration value="everyChunk" />
+    </xs:restriction>
+  </xs:simpleType>
+    <xs:simpleType name="transitionType">
+    <xs:restriction base="xs:string">
+      <xs:enumeration value="from" />
+      <xs:enumeration value="to" />
+    </xs:restriction>
+  </xs:simpleType> 
+  <xs:element name="Metadata">
+    <xs:complexType>
+      <xs:sequence>
+        <xs:choice maxOccurs="unbounded">
+          <xs:element maxOccurs="unbounded" name="Event">
+            <xs:complexType>
+              <xs:sequence>
+                <xs:element maxOccurs="unbounded" name="Field">
+                  <xs:complexType>
+                    <xs:attribute name="type" type="xs:NMTOKEN" use="required" />
+                    <xs:attribute name="struct" type="xs:boolean" use="optional" />
+                    <xs:attribute name="array" type="xs:boolean" use="optional" />
+                    <xs:attribute name="name" type="xs:NMTOKEN" use="required" />
+                    <xs:attribute name="label" type="xs:string" use="required" />
+                    <xs:attribute name="description" type="xs:string" use="optional" />
+                    <xs:attribute name="contentType" type="xs:string" use="optional" />
+                    <xs:attribute name="relation" type="xs:string" use="optional" />
+                    <xs:attribute name="experimental" type="xs:boolean" use="optional" />
+                    <xs:attribute name="transition" type="transitionType" use="optional" />
+                  </xs:complexType>
+                </xs:element>
+              </xs:sequence>
+              <xs:attribute name="name" type="xs:NMTOKEN" use="required" />
+              <xs:attribute name="category" type="xs:string" use="required" />
+              <xs:attribute name="label" type="xs:string" use="required" />
+              <xs:attribute name="description" type="xs:string" use="optional" />
+              <xs:attribute name="experimental" type="xs:boolean" use="optional" />
+              <xs:attribute name="thread" type="xs:boolean" use="optional" />
+              <xs:attribute name="startTime" type="xs:boolean" use="optional" />
+              <xs:attribute name="stackTrace" type="xs:boolean" use="optional" />
+              <xs:attribute name="period" type="periodType" use="optional" />
+              <xs:attribute name="cutoff" type="xs:boolean" use="optional" />
+            </xs:complexType>
+          </xs:element>
+          <xs:element maxOccurs="unbounded" name="Type">
+            <xs:complexType>
+              <xs:sequence>
+                <xs:element maxOccurs="unbounded" name="Field">
+                  <xs:complexType>
+                    <xs:attribute name="type" type="xs:NMTOKEN" use="required" />
+                    <xs:attribute name="struct" type="xs:boolean" use="optional" />
+                    <xs:attribute name="array" type="xs:boolean" use="optional" />
+                    <xs:attribute name="name" type="xs:NMTOKEN" use="required" />
+                    <xs:attribute name="contentType" type="xs:string" use="optional" />
+                    <xs:attribute name="label" type="xs:string" use="required" />
+                    <xs:attribute name="description" type="xs:string" use="optional" />
+                    <xs:attribute name="experimental" type="xs:boolean" use="optional" />
+                    <xs:attribute name="relation" type="xs:string" use="optional" />
+                  </xs:complexType>
+                </xs:element>
+              </xs:sequence>
+              <xs:attribute name="name" type="xs:NMTOKEN" use="required" />
+              <xs:attribute name="label" type="xs:string" use="optional" />
+              <xs:attribute name="experimental" type="xs:boolean" use="optional" />
+            </xs:complexType>
+          </xs:element>
+          <xs:element name="XmlType">
+            <xs:complexType>
+              <xs:attribute name="name" type="xs:NMTOKEN" use="required" />
+              <xs:attribute name="contentType" type="xs:NMTOKEN" use="optional" />
+              <xs:attribute name="javaType" type="xs:NMTOKEN" use="optional" />
+              <xs:attribute name="unsigned" type="xs:boolean" use="optional" />
+              <xs:attribute name="parameterType" type="xs:string" use="required" />
+              <xs:attribute name="fieldType" type="xs:string" use="required" />
+             </xs:complexType>
+          </xs:element>
+          <xs:element name="XmlContentType">
+            <xs:complexType>
+              <xs:attribute name="name" type="xs:NMTOKEN" use="required" />
+              <xs:attribute name="annotationType" type="xs:NMTOKEN" use="required" />
+              <xs:attribute name="annotationValue" type="xs:string" use="optional" />
+            </xs:complexType>
+          </xs:element>
+          <xs:element name="Relation">
+            <xs:complexType>
+              <xs:attribute name="name" type="xs:NMTOKEN" use="required" />
+            </xs:complexType>
+          </xs:element>
+        </xs:choice>
+      </xs:sequence>
+    </xs:complexType>
+  </xs:element>
+</xs:schema>
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrNetworkUtilization.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/metadata/jfrSerializer.hpp"
+#include "jfr/periodic/jfrNetworkUtilization.hpp"
+#include "jfr/periodic/jfrOSInterface.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/os_perf.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/growableArray.hpp"
+
+struct InterfaceEntry {
+  char* name;
+  traceid id;
+  uint64_t bytes_in;
+  uint64_t bytes_out;
+  bool in_use;
+};
+
+static GrowableArray<InterfaceEntry>* _interfaces = NULL;
+
+void JfrNetworkUtilization::destroy() {
+  if (_interfaces != NULL) {
+    for (int i = 0; i < _interfaces->length(); ++i) {
+      FREE_C_HEAP_ARRAY(char, _interfaces->at(i).name, mtInternal);
+    }
+    delete _interfaces;
+    _interfaces = NULL;
+  }
+}
+
+static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArray<InterfaceEntry>* interfaces) {
+  assert(iface != NULL, "invariant");
+  assert(interfaces != NULL, "invariant");
+
+  // single threaded premise
+  static traceid interface_id = 0;
+
+  const char* name = iface->get_name();
+  assert(name != NULL, "invariant");
+
+  InterfaceEntry entry;
+  const size_t length = strlen(name);
+  entry.name = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
+  strncpy(entry.name, name, length + 1);
+  entry.id = ++interface_id;
+  entry.bytes_in = iface->get_bytes_in();
+  entry.bytes_out = iface->get_bytes_out();
+  entry.in_use = false;
+  return _interfaces->at(_interfaces->append(entry));
+}
+
+static GrowableArray<InterfaceEntry>* get_interfaces() {
+  if (_interfaces == NULL) {
+    _interfaces = new(ResourceObj::C_HEAP, mtTracing) GrowableArray<InterfaceEntry>(10, true, mtTracing);
+  }
+  return _interfaces;
+}
+
+static InterfaceEntry& get_entry(const NetworkInterface* iface) {
+  // Remember the index we started at last time, since we're most likely looking at them
+  // in the same order every time.
+  static int saved_index = -1;
+
+  GrowableArray<InterfaceEntry>* interfaces = get_interfaces();
+  assert(interfaces != NULL, "invariant");
+  for (int i = 0; i < _interfaces->length(); ++i) {
+    saved_index = (saved_index + 1) % _interfaces->length();
+    if (strcmp(_interfaces->at(saved_index).name, iface->get_name()) == 0) {
+      return _interfaces->at(saved_index);
+    }
+  }
+  return new_entry(iface, interfaces);
+}
+
+// If current counters are less than previous we assume the interface has been reset
+// If no bytes have been either sent or received, we'll also skip the event
+static uint64_t rate_per_second(uint64_t current, uint64_t old, const JfrTickspan& interval) {
+  assert(interval.value() > 0, "invariant");
+  if (current <= old) {
+    return 0;
+  }
+  return ((current - old) * NANOSECS_PER_SEC) / interval.nanoseconds();
+}
+
+static bool get_interfaces(NetworkInterface** network_interfaces) {
+  const int ret_val = JfrOSInterface::network_utilization(network_interfaces);
+  if (ret_val == OS_ERR) {
+    if (LogJFR) tty->print_cr("Unable to generate network utilization events");
+    return false;
+  }
+  return ret_val != FUNCTIONALITY_NOT_IMPLEMENTED;
+}
+
+class JfrNetworkInterfaceName : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer) {
+    assert(_interfaces != NULL, "invariant");
+    const JfrCheckpointContext ctx = writer.context();
+    const intptr_t count_offset = writer.reserve(sizeof(u4)); // Don't know how many yet
+    int active_interfaces = 0;
+    for (int i = 0; i < _interfaces->length(); ++i) {
+      InterfaceEntry& entry = _interfaces->at(i);
+      if (entry.in_use) {
+        entry.in_use = false;
+        writer.write_key(entry.id);
+        writer.write(entry.name);
+        ++active_interfaces;
+      }
+    }
+    if (active_interfaces == 0) {
+      // nothing to write, restore context
+      writer.set_context(ctx);
+      return;
+    }
+    writer.write_count(active_interfaces, count_offset);
+  }
+};
+
+static bool register_network_interface_name_serializer() {
+  assert(_interfaces != NULL, "invariant");
+  return JfrSerializer::register_serializer(TYPE_NETWORKINTERFACENAME,
+                                            false, // require safepoint
+                                            false, // disallow caching; we want a callback every rotation
+                                            new JfrNetworkInterfaceName());
+}
+
+void JfrNetworkUtilization::send_events() {
+  ResourceMark rm;
+  NetworkInterface* network_interfaces;
+  if (!get_interfaces(&network_interfaces)) {
+    return;
+  }
+  if (LogJFR && Verbose) tty->print_cr("Reporting network utilization");
+  static JfrTicks last_sample_instant;
+  const JfrTicks cur_time = JfrTicks::now();
+  const JfrTickspan interval = last_sample_instant == 0 ? cur_time - cur_time : cur_time - last_sample_instant;
+  last_sample_instant = cur_time;
+  for (NetworkInterface *cur = network_interfaces; cur != NULL; cur = cur->next()) {
+    InterfaceEntry& entry = get_entry(cur);
+    if (interval.value() > 0) {
+      const uint64_t current_bytes_in = cur->get_bytes_in();
+      const uint64_t current_bytes_out = cur->get_bytes_out();
+      const uint64_t read_rate = rate_per_second(current_bytes_in, entry.bytes_in, interval);
+      const uint64_t write_rate = rate_per_second(current_bytes_out, entry.bytes_out, interval);
+      if (read_rate > 0 || write_rate > 0) {
+        entry.in_use = true;
+        EventNetworkUtilization event(UNTIMED);
+        event.set_starttime(cur_time);
+        event.set_endtime(cur_time);
+        event.set_networkInterface(entry.id);
+        event.set_readRate(read_rate);
+        event.set_writeRate(write_rate);
+        event.commit();
+      }
+      // update existing entry with new values
+      entry.bytes_in = current_bytes_in;
+      entry.bytes_out = current_bytes_out;
+    }
+  }
+
+  static bool is_serializer_registered = false;
+  if (!is_serializer_registered) {
+    is_serializer_registered = register_network_interface_name_serializer();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrNetworkUtilization.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_PERIODIC_JFRNETWORKUTILIZATION_HPP
+#define SHARE_VM_JFR_PERIODIC_JFRNETWORKUTILIZATION_HPP
+
+#include "memory/allocation.hpp"
+
+class NetworkInterface;
+
+class JfrNetworkUtilization : public AllStatic {
+public:
+  static void destroy();
+  static void send_events();
+};
+
+#endif // SHARE_VM_JFR_PERIODIC_JFRNETWORKUTILIZATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrOSInterface.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/periodic/jfrNetworkUtilization.hpp"
+#include "jfr/periodic/jfrOSInterface.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/os.hpp"
+#include "runtime/os_perf.hpp"
+#include "utilities/ostream.hpp"
+
+#include <stdlib.h> // for environment variables
+#ifdef __APPLE__
+#include <crt_externs.h>
+#define environ (*_NSGetEnviron())
+#endif
+
+#ifndef environ
+extern char** environ;
+#endif
+
+static JfrOSInterface* _instance = NULL;
+
+JfrOSInterface& JfrOSInterface::instance() {
+  return *_instance;
+}
+
+JfrOSInterface* JfrOSInterface::create() {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrOSInterface();
+  return _instance;
+}
+
+void JfrOSInterface::destroy() {
+  JfrNetworkUtilization::destroy();
+  if (_instance != NULL) {
+    delete _instance;
+    _instance = NULL;
+  }
+}
+
+class JfrOSInterface::JfrOSInterfaceImpl : public JfrCHeapObj {
+  friend class JfrOSInterface;
+ private:
+  CPUInformationInterface* _cpu_info_interface;
+  CPUPerformanceInterface* _cpu_perf_interface;
+  SystemProcessInterface*  _system_process_interface;
+  NetworkPerformanceInterface* _network_performance_interface;
+
+  // stub helper
+  void functionality_not_implemented(char** str) const;
+
+  JfrOSInterfaceImpl();
+  bool initialize();
+  ~JfrOSInterfaceImpl();
+
+  // cpu info
+  int cpu_information(CPUInformation& cpu_info);
+  int cpu_load(int which_logical_cpu, double* cpu_load);
+  int context_switch_rate(double* rate);
+  int cpu_load_total_process(double* cpu_load);
+  int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotal);
+
+  // os information
+  int os_version(char** os_version) const;
+
+  // environment information
+  void generate_environment_variables_events();
+
+   // system processes information
+  int system_processes(SystemProcess** system_processes, int* no_of_sys_processes);
+
+  int network_utilization(NetworkInterface** network_interfaces) const;
+};
+
+JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(NULL),
+                                                           _cpu_perf_interface(NULL),
+                                                           _system_process_interface(NULL) {}
+
+bool JfrOSInterface::JfrOSInterfaceImpl::initialize() {
+  _cpu_info_interface = new CPUInformationInterface();
+  if (!(_cpu_info_interface != NULL && _cpu_info_interface->initialize())) {
+    return false;
+  }
+  _cpu_perf_interface = new CPUPerformanceInterface();
+  if (!(_cpu_perf_interface != NULL && _cpu_perf_interface->initialize())) {
+    return false;
+  }
+  _system_process_interface = new SystemProcessInterface();
+  if (!(_system_process_interface != NULL && _system_process_interface->initialize())) {
+    return false;
+  }
+  _network_performance_interface = new NetworkPerformanceInterface();
+  return _network_performance_interface != NULL && _network_performance_interface->initialize();
+}
+
+JfrOSInterface::JfrOSInterfaceImpl::~JfrOSInterfaceImpl(void) {
+  if (_cpu_info_interface != NULL) {
+    delete _cpu_info_interface;
+    _cpu_info_interface = NULL;
+  }
+  if (_cpu_perf_interface != NULL) {
+    delete _cpu_perf_interface;
+    _cpu_perf_interface = NULL;
+  }
+  if (_system_process_interface != NULL) {
+    delete _system_process_interface;
+    _system_process_interface = NULL;
+  }
+  if (_network_performance_interface != NULL) {
+    delete _network_performance_interface;
+    _network_performance_interface = NULL;
+  }
+}
+
+int JfrOSInterface::JfrOSInterfaceImpl::cpu_load(int which_logical_cpu, double* cpu_load) {
+  return _cpu_perf_interface->cpu_load(which_logical_cpu, cpu_load);
+}
+
+int JfrOSInterface::JfrOSInterfaceImpl::context_switch_rate(double* rate) {
+  return _cpu_perf_interface->context_switch_rate(rate);
+}
+
+int JfrOSInterface::JfrOSInterfaceImpl::cpu_load_total_process(double* cpu_load) {
+  return _cpu_perf_interface->cpu_load_total_process(cpu_load);
+}
+
+int JfrOSInterface::JfrOSInterfaceImpl::cpu_loads_process(double* pjvmUserLoad,
+                                                          double* pjvmKernelLoad,
+                                                          double* psystemTotal) {
+  return _cpu_perf_interface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal);
+}
+
+int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) {
+  return _cpu_info_interface->cpu_information(cpu_info);
+}
+
+int JfrOSInterface::JfrOSInterfaceImpl::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) {
+  assert(system_processes != NULL, "system_processes pointer is NULL!");
+  assert(no_of_sys_processes != NULL, "no_of_sys_processes pointer is NULL!");
+  return _system_process_interface->system_processes(system_processes, no_of_sys_processes);
+}
+
+int JfrOSInterface::JfrOSInterfaceImpl::network_utilization(NetworkInterface** network_interfaces) const {
+  return _network_performance_interface->network_utilization(network_interfaces);
+}
+
+// assigned char* is RESOURCE_HEAP_ALLOCATED
+// caller need to ensure proper ResourceMark placement.
+int JfrOSInterface::JfrOSInterfaceImpl::os_version(char** os_version) const {
+  assert(os_version != NULL, "os_version pointer is NULL!");
+  stringStream os_ver_info;
+  os::print_os_info_brief(&os_ver_info);
+  *os_version = os_ver_info.as_string();
+  return OS_OK;
+}
+
+void JfrOSInterface::JfrOSInterfaceImpl::functionality_not_implemented(char** str) const {
+  assert(str != NULL, "address to string is NULL!");
+  const char* not_impl = "Functionality_not_implemented";
+  const size_t not_impl_len = strlen(not_impl);
+  *str = NEW_C_HEAP_ARRAY(char, not_impl_len+1, mtTracing);
+  strncpy(*str, not_impl, not_impl_len);
+  (*str)[not_impl_len] = '\0';
+}
+
+JfrOSInterface::JfrOSInterface() {
+  _impl = NULL;
+}
+
+bool JfrOSInterface::initialize() {
+  _impl = new JfrOSInterface::JfrOSInterfaceImpl();
+  return _impl != NULL && _impl->initialize();
+}
+
+JfrOSInterface::~JfrOSInterface() {
+  if (_impl != NULL) {
+    delete _impl;
+    _impl = NULL;
+  }
+}
+
+int JfrOSInterface::cpu_information(CPUInformation& cpu_info) {
+  return instance()._impl->cpu_information(cpu_info);
+}
+
+int JfrOSInterface::cpu_load(int which_logical_cpu, double* cpu_load) {
+  return instance()._impl->cpu_load(which_logical_cpu, cpu_load);
+}
+
+int JfrOSInterface::context_switch_rate(double* rate) {
+  return instance()._impl->context_switch_rate(rate);
+}
+
+int JfrOSInterface::cpu_load_total_process(double* cpu_load) {
+  return instance()._impl->cpu_load_total_process(cpu_load);
+}
+
+int JfrOSInterface::cpu_loads_process(double* jvm_user_load, double* jvm_kernel_load, double* system_total_load){
+  return instance()._impl->cpu_loads_process(jvm_user_load, jvm_kernel_load, system_total_load);
+}
+
+int JfrOSInterface::os_version(char** os_version) {
+  return instance()._impl->os_version(os_version);
+}
+
+int JfrOSInterface::generate_initial_environment_variable_events() {
+  if (environ == NULL) {
+    return OS_ERR;
+  }
+
+  if (EventInitialEnvironmentVariable::is_enabled()) {
+    // One time stamp for all events, so they can be grouped together
+    JfrTicks time_stamp = JfrTicks::now();
+    for (char** p = environ; *p != NULL; p++) {
+      char* variable = *p;
+      char* equal_sign = strchr(variable, '=');
+      if (equal_sign != NULL) {
+        // Extract key/value
+        ResourceMark rm;
+        ptrdiff_t key_length = equal_sign - variable;
+        char* key = NEW_RESOURCE_ARRAY(char, key_length + 1);
+        char* value = equal_sign + 1;
+        strncpy(key, variable, key_length);
+        key[key_length] = '\0';
+        EventInitialEnvironmentVariable event(UNTIMED);
+        event.set_endtime(time_stamp);
+        event.set_key(key);
+        event.set_value(value);
+        event.commit();
+      }
+    }
+  }
+  return OS_OK;
+}
+
+int JfrOSInterface::system_processes(SystemProcess** sys_processes, int* no_of_sys_processes) {
+  return instance()._impl->system_processes(sys_processes, no_of_sys_processes);
+}
+
+int JfrOSInterface::network_utilization(NetworkInterface** network_interfaces) {
+  return instance()._impl->network_utilization(network_interfaces);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrOSInterface.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_PERIODIC_JFROSINTERFACE_HPP
+#define SHARE_VM_JFR_PERIODIC_JFROSINTERFACE_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class CPUInformation;
+// XXX
+//class EnvironmentVariable;
+class NetworkInterface;
+class SystemProcess;
+
+class JfrOSInterface: public JfrCHeapObj {
+  friend class JfrRecorder;
+ private:
+  class JfrOSInterfaceImpl;
+  JfrOSInterfaceImpl* _impl;
+
+  JfrOSInterface();
+  ~JfrOSInterface();
+  bool initialize();
+  static JfrOSInterface& instance();
+  static JfrOSInterface* create();
+  static void destroy();
+
+ public:
+  static int cpu_information(CPUInformation& cpu_info);
+  static int cpu_load(int which_logical_cpu, double* cpu_load);
+  static int context_switch_rate(double* rate);
+  static int cpu_load_total_process(double* cpu_load);
+  static int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad);
+  static int os_version(char** os_version);
+  static int generate_initial_environment_variable_events();
+  static int system_processes(SystemProcess** system_processes, int* no_of_sys_processes);
+  static int network_utilization(NetworkInterface** network_interfaces);
+};
+
+#endif // SHARE_VM_JFR_PERIODIC_JFROSINTERFACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrPeriodic.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "classfile/classLoaderStats.hpp"
+#include "classfile/javaClasses.hpp"
+#include "code/codeCache.hpp"
+#include "compiler/compileBroker.hpp"
+#include "gc_implementation/g1/g1HeapRegionEventSender.hpp"
+#include "gc_implementation/shared/gcConfiguration.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/objectCountEventSender.hpp"
+#include "gc_implementation/shared/vmGCOperations.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/periodic/jfrOSInterface.hpp"
+#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
+#include "jfr/periodic/jfrThreadDumpEvent.hpp"
+#include "jfr/periodic/jfrNetworkUtilization.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/support/jfrThreadId.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfrfiles/jfrPeriodic.hpp"
+#include "memory/heapInspection.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "runtime/os_perf.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/sweeper.hpp"
+#include "runtime/vmThread.hpp"
+#include "services/classLoadingService.hpp"
+#include "services/management.hpp"
+#include "services/threadService.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+/**
+ *  JfrPeriodic class
+ *  Implementation of declarations in
+ *  xsl generated traceRequestables.hpp
+ */
+#define TRACE_REQUEST_FUNC(id)    void JfrPeriodicEventSet::request##id(void)
+
+TRACE_REQUEST_FUNC(JVMInformation) {
+  ResourceMark rm;
+  EventJVMInformation event;
+  event.set_jvmName(VM_Version::vm_name());
+  event.set_jvmVersion(VM_Version::internal_vm_info_string());
+  event.set_javaArguments(Arguments::java_command());
+  event.set_jvmArguments(Arguments::jvm_args());
+  event.set_jvmFlags(Arguments::jvm_flags());
+  event.set_jvmStartTime(Management::vm_init_done_time());
+  event.commit();
+ }
+
+TRACE_REQUEST_FUNC(OSInformation) {
+  ResourceMark rm;
+  char* os_name = NEW_RESOURCE_ARRAY(char, 2048);
+  JfrOSInterface::os_version(&os_name);
+  EventOSInformation event;
+  event.set_osVersion(os_name);
+  event.commit();
+}
+
+/*
+ * This is left empty on purpose, having ExecutionSample as a requestable
+ * is a way of getting the period. The period is passed to ThreadSampling::update_period.
+ * Implementation in jfrSamples.cpp
+ */
+TRACE_REQUEST_FUNC(ExecutionSample) {
+}
+TRACE_REQUEST_FUNC(NativeMethodSample) {
+}
+
+TRACE_REQUEST_FUNC(ThreadDump) {
+  ResourceMark rm;
+  EventThreadDump event;
+  event.set_result(JfrDcmdEvent::thread_dump());
+  event.commit();
+}
+
+static int _native_library_callback(const char* name, address base, address top, void *param) {
+  EventNativeLibrary event(UNTIMED);
+  event.set_name(name);
+  event.set_baseAddress((u8)base);
+  event.set_topAddress((u8)top);
+  event.set_endtime(*(JfrTicks*) param);
+  event.commit();
+  return 0;
+}
+
+TRACE_REQUEST_FUNC(NativeLibrary) {
+  JfrTicks ts= JfrTicks::now();
+  os::get_loaded_modules_info(&_native_library_callback, (void *)&ts);
+}
+
+TRACE_REQUEST_FUNC(InitialEnvironmentVariable) {
+  JfrOSInterface::generate_initial_environment_variable_events();
+}
+
+TRACE_REQUEST_FUNC(CPUInformation) {
+  CPUInformation cpu_info;
+  int ret_val = JfrOSInterface::cpu_information(cpu_info);
+  if (ret_val == OS_ERR) {
+    if (LogJFR) tty->print_cr( "Unable to generate requestable event CPUInformation");
+    return;
+  }
+  if (ret_val == FUNCTIONALITY_NOT_IMPLEMENTED) {
+     return;
+  }
+  if (ret_val == OS_OK) {
+    EventCPUInformation event;
+    event.set_cpu(cpu_info.cpu_name());
+    event.set_description(cpu_info.cpu_description());
+    event.set_sockets(cpu_info.number_of_sockets());
+    event.set_cores(cpu_info.number_of_cores());
+    event.set_hwThreads(cpu_info.number_of_hardware_threads());
+    event.commit();
+  }
+}
+
+TRACE_REQUEST_FUNC(CPULoad) {
+  double u = 0; // user time
+  double s = 0; // kernel time
+  double t = 0; // total time
+  int ret_val = JfrOSInterface::cpu_loads_process(&u, &s, &t);
+  if (ret_val == OS_ERR) {
+    if (LogJFR) tty->print_cr( "Unable to generate requestable event CPULoad");
+    return;
+  }
+  if (ret_val == OS_OK) {
+    EventCPULoad event;
+    event.set_jvmUser((float)u);
+    event.set_jvmSystem((float)s);
+    event.set_machineTotal((float)t);
+    event.commit();
+  }
+}
+
+TRACE_REQUEST_FUNC(ThreadCPULoad) {
+  JfrThreadCPULoadEvent::send_events();
+}
+
+TRACE_REQUEST_FUNC(NetworkUtilization) {
+  JfrNetworkUtilization::send_events();
+}
+
+TRACE_REQUEST_FUNC(CPUTimeStampCounter) {
+  EventCPUTimeStampCounter event;
+  event.set_fastTimeEnabled(JfrTime::is_ft_enabled());
+  event.set_fastTimeAutoEnabled(JfrTime::is_ft_supported());
+  event.set_osFrequency(os::elapsed_frequency());
+  event.set_fastTimeFrequency(JfrTime::frequency());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(SystemProcess) {
+  char pid_buf[16];
+  SystemProcess* processes = NULL;
+  int num_of_processes = 0;
+  JfrTicks start_time = JfrTicks::now();
+  int ret_val = JfrOSInterface::system_processes(&processes, &num_of_processes);
+  if (ret_val == OS_ERR) {
+    if (LogJFR) tty->print_cr( "Unable to generate requestable event SystemProcesses");
+    return;
+  }
+  JfrTicks end_time = JfrTicks::now();
+  if (ret_val == FUNCTIONALITY_NOT_IMPLEMENTED) {
+    return;
+  }
+  if (ret_val == OS_OK) {
+    // feature is implemented, write real event
+    while (processes != NULL) {
+      SystemProcess* tmp = processes;
+      const char* info = processes->command_line();
+      if (info == NULL) {
+         info = processes->path();
+      }
+      if (info == NULL) {
+         info = processes->name();
+      }
+      if (info == NULL) {
+         info = "?";
+      }
+      jio_snprintf(pid_buf, sizeof(pid_buf), "%d", processes->pid());
+      EventSystemProcess event(UNTIMED);
+      event.set_pid(pid_buf);
+      event.set_commandLine(info);
+      event.set_starttime(start_time);
+      event.set_endtime(end_time);
+      event.commit();
+      processes = processes->next();
+      delete tmp;
+    }
+  }
+}
+
+TRACE_REQUEST_FUNC(ThreadContextSwitchRate) {
+  double rate = 0.0;
+  int ret_val = JfrOSInterface::context_switch_rate(&rate);
+  if (ret_val == OS_ERR) {
+    if (LogJFR) tty->print_cr( "Unable to generate requestable event ThreadContextSwitchRate");
+    return;
+  }
+  if (ret_val == FUNCTIONALITY_NOT_IMPLEMENTED) {
+    return;
+  }
+  if (ret_val == OS_OK) {
+    EventThreadContextSwitchRate event;
+    event.set_switchRate((float)rate + 0.0f);
+    event.commit();
+  }
+}
+
+#define SEND_FLAGS_OF_TYPE(eventType, flagType)                   \
+  do {                                                            \
+    Flag *flag = Flag::flags;                                     \
+    while (flag->_name != NULL) {                                 \
+      if (flag->is_ ## flagType()) {                              \
+        if (flag->is_unlocked()) {                                \
+          Event ## eventType event;                               \
+          event.set_name(flag->_name);                            \
+          event.set_value(flag->get_ ## flagType());              \
+          event.set_origin(flag->get_origin());                   \
+          event.commit();                                         \
+        }                                                         \
+      }                                                           \
+      ++flag;                                                     \
+    }                                                             \
+  } while (0)
+
+TRACE_REQUEST_FUNC(IntFlag) {
+  SEND_FLAGS_OF_TYPE(IntFlag, intx);
+}
+
+TRACE_REQUEST_FUNC(UnsignedIntFlag) {
+  SEND_FLAGS_OF_TYPE(UnsignedIntFlag, uintx);
+}
+
+TRACE_REQUEST_FUNC(LongFlag) {
+  SEND_FLAGS_OF_TYPE(LongFlag, intx);
+}
+
+TRACE_REQUEST_FUNC(UnsignedLongFlag) {
+  SEND_FLAGS_OF_TYPE(UnsignedLongFlag, uintx);
+  SEND_FLAGS_OF_TYPE(UnsignedLongFlag, uint64_t);
+}
+
+TRACE_REQUEST_FUNC(DoubleFlag) {
+  SEND_FLAGS_OF_TYPE(DoubleFlag, double);
+}
+
+TRACE_REQUEST_FUNC(BooleanFlag) {
+  SEND_FLAGS_OF_TYPE(BooleanFlag, bool);
+}
+
+TRACE_REQUEST_FUNC(StringFlag) {
+  SEND_FLAGS_OF_TYPE(StringFlag, ccstr);
+}
+
+class VM_GC_SendObjectCountEvent : public VM_GC_HeapInspection {
+ public:
+  VM_GC_SendObjectCountEvent() : VM_GC_HeapInspection(NULL, true) {}
+  virtual void doit() {
+    ObjectCountEventSender::enable_requestable_event();
+    collect();
+    ObjectCountEventSender::disable_requestable_event();
+  }
+};
+
+TRACE_REQUEST_FUNC(ObjectCount) {
+  VM_GC_SendObjectCountEvent op;
+  VMThread::execute(&op);
+}
+
+class VM_G1SendHeapRegionInfoEvents : public VM_Operation {
+  virtual void doit() {
+    G1HeapRegionEventSender::send_events();
+  }
+  virtual VMOp_Type type() const { return VMOp_HeapIterateOperation; }
+};
+
+TRACE_REQUEST_FUNC(G1HeapRegionInformation) {
+  if (UseG1GC) {
+    VM_G1SendHeapRegionInfoEvents op;
+    VMThread::execute(&op);
+  }
+}
+
+// Java Mission Control (JMC) uses (Java) Long.MIN_VALUE to describe that a
+// long value is undefined.
+static jlong jmc_undefined_long = min_jlong;
+
+TRACE_REQUEST_FUNC(GCConfiguration) {
+  GCConfiguration conf;
+  jlong pause_target = conf.has_pause_target_default_value() ? jmc_undefined_long : conf.pause_target();
+  EventGCConfiguration event;
+  event.set_youngCollector(conf.young_collector());
+  event.set_oldCollector(conf.old_collector());
+  event.set_parallelGCThreads(conf.num_parallel_gc_threads());
+  event.set_concurrentGCThreads(conf.num_concurrent_gc_threads());
+  event.set_usesDynamicGCThreads(conf.uses_dynamic_gc_threads());
+  event.set_isExplicitGCConcurrent(conf.is_explicit_gc_concurrent());
+  event.set_isExplicitGCDisabled(conf.is_explicit_gc_disabled());
+  event.set_gcTimeRatio(conf.gc_time_ratio());
+  event.set_pauseTarget((s8)pause_target);
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(GCTLABConfiguration) {
+  GCTLABConfiguration conf;
+  EventGCTLABConfiguration event;
+  event.set_usesTLABs(conf.uses_tlabs());
+  event.set_minTLABSize(conf.min_tlab_size());
+  event.set_tlabRefillWasteLimit(conf.tlab_refill_waste_limit());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(GCSurvivorConfiguration) {
+  GCSurvivorConfiguration conf;
+  EventGCSurvivorConfiguration event;
+  event.set_maxTenuringThreshold(conf.max_tenuring_threshold());
+  event.set_initialTenuringThreshold(conf.initial_tenuring_threshold());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(GCHeapConfiguration) {
+  GCHeapConfiguration conf;
+  EventGCHeapConfiguration event;
+  event.set_minSize(conf.min_size());
+  event.set_maxSize(conf.max_size());
+  event.set_initialSize(conf.initial_size());
+  event.set_usesCompressedOops(conf.uses_compressed_oops());
+  event.set_compressedOopsMode(conf.narrow_oop_mode());
+  event.set_objectAlignment(conf.object_alignment_in_bytes());
+  event.set_heapAddressBits(conf.heap_address_size_in_bits());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(YoungGenerationConfiguration) {
+  GCYoungGenerationConfiguration conf;
+  jlong max_size = conf.has_max_size_default_value() ? jmc_undefined_long : conf.max_size();
+  EventYoungGenerationConfiguration event;
+  event.set_maxSize((u8)max_size);
+  event.set_minSize(conf.min_size());
+  event.set_newRatio(conf.new_ratio());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(InitialSystemProperty) {
+  SystemProperty* p = Arguments::system_properties();
+  JfrTicks time_stamp = JfrTicks::now();
+  while (p !=  NULL) {
+    if (true/* XXX fix me if you want !p->internal()*/) {
+      EventInitialSystemProperty event(UNTIMED);
+      event.set_key(p->key());
+      event.set_value(p->value());
+      event.set_endtime(time_stamp);
+      event.commit();
+    }
+    p = p->next();
+  }
+}
+
+TRACE_REQUEST_FUNC(ThreadAllocationStatistics) {
+  ResourceMark rm;
+  int initial_size = Threads::number_of_threads();
+  GrowableArray<jlong> allocated(initial_size);
+  GrowableArray<traceid> thread_ids(initial_size);
+  JfrTicks time_stamp = JfrTicks::now();
+  {
+    // Collect allocation statistics while holding threads lock
+    MutexLockerEx ml(Threads_lock);
+    for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+      allocated.append(thread->cooked_allocated_bytes());
+      thread_ids.append(JFR_THREAD_ID(thread));
+    }
+  }
+
+  // Write allocation statistics to buffer.
+  for(int i = 0; i < thread_ids.length(); i++) {
+    EventThreadAllocationStatistics event(UNTIMED);
+    event.set_allocated(allocated.at(i));
+    event.set_thread(thread_ids.at(i));
+    event.set_endtime(time_stamp);
+    event.commit();
+  }
+}
+
+/**
+ *  PhysicalMemory event represents:
+ *
+ *  @totalSize == The amount of physical memory (hw) installed and reported by the OS, in bytes.
+ *  @usedSize  == The amount of physical memory currently in use in the system (reserved/committed), in bytes.
+ *
+ *  Both fields are systemwide, i.e. represents the entire OS/HW environment.
+ *  These fields do not include virtual memory.
+ *
+ *  If running inside a guest OS on top of a hypervisor in a virtualized environment,
+ *  the total memory reported is the amount of memory configured for the guest OS by the hypervisor.
+ */
+TRACE_REQUEST_FUNC(PhysicalMemory) {
+  u8 totalPhysicalMemory = os::physical_memory();
+  EventPhysicalMemory event;
+  event.set_totalSize(totalPhysicalMemory);
+  event.set_usedSize(totalPhysicalMemory - os::available_memory());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(JavaThreadStatistics) {
+  EventJavaThreadStatistics event;
+  event.set_activeCount(ThreadService::get_live_thread_count());
+  event.set_daemonCount(ThreadService::get_daemon_thread_count());
+  event.set_accumulatedCount(ThreadService::get_total_thread_count());
+  event.set_peakCount(ThreadService::get_peak_thread_count());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(ClassLoadingStatistics) {
+  EventClassLoadingStatistics event;
+  event.set_loadedClassCount(ClassLoadingService::loaded_class_count());
+  event.set_unloadedClassCount(ClassLoadingService::unloaded_class_count());
+  event.commit();
+}
+
+class JfrClassLoaderStatsClosure : public ClassLoaderStatsClosure {
+public:
+  JfrClassLoaderStatsClosure() : ClassLoaderStatsClosure(NULL) {}
+
+  bool do_entry(oop const& key, ClassLoaderStats* const& cls) {
+    const ClassLoaderData* this_cld = cls->_class_loader != NULL ?
+      java_lang_ClassLoader::loader_data(cls->_class_loader) : (ClassLoaderData*)NULL;
+    const ClassLoaderData* parent_cld = cls->_parent != NULL ?
+      java_lang_ClassLoader::loader_data(cls->_parent) : (ClassLoaderData*)NULL;
+    EventClassLoaderStatistics event;
+    event.set_classLoader(this_cld);
+    event.set_parentClassLoader(parent_cld);
+    event.set_classLoaderData((intptr_t)cls->_cld);
+    event.set_classCount(cls->_classes_count);
+    event.set_chunkSize(cls->_chunk_sz);
+    event.set_blockSize(cls->_block_sz);
+    event.set_anonymousClassCount(cls->_anon_classes_count);
+    event.set_anonymousChunkSize(cls->_anon_chunk_sz);
+    event.set_anonymousBlockSize(cls->_anon_block_sz);
+    event.commit();
+    return true;
+  }
+
+  void createEvents(void) {
+    _stats->iterate(this);
+  }
+};
+
+class JfrClassLoaderStatsVMOperation : public ClassLoaderStatsVMOperation {
+ public:
+  JfrClassLoaderStatsVMOperation() : ClassLoaderStatsVMOperation(NULL) { }
+
+  void doit() {
+    JfrClassLoaderStatsClosure clsc;
+    ClassLoaderDataGraph::cld_do(&clsc);
+    clsc.createEvents();
+  }
+};
+
+TRACE_REQUEST_FUNC(ClassLoaderStatistics) {
+  JfrClassLoaderStatsVMOperation op;
+  VMThread::execute(&op);
+}
+
+TRACE_REQUEST_FUNC(CompilerStatistics) {
+  EventCompilerStatistics event;
+  event.set_compileCount(CompileBroker::get_total_compile_count());
+  event.set_bailoutCount(CompileBroker::get_total_bailout_count());
+  event.set_invalidatedCount(CompileBroker::get_total_invalidated_count());
+  event.set_osrCompileCount(CompileBroker::get_total_osr_compile_count());
+  event.set_standardCompileCount(CompileBroker::get_total_standard_compile_count());
+  event.set_osrBytesCompiled(CompileBroker::get_sum_osr_bytes_compiled());
+  event.set_standardBytesCompiled(CompileBroker::get_sum_standard_bytes_compiled());
+  event.set_nmetodsSize(CompileBroker::get_sum_nmethod_size());
+  event.set_nmetodCodeSize(CompileBroker::get_sum_nmethod_code_size());
+  event.set_peakTimeSpent(CompileBroker::get_peak_compilation_time());
+  event.set_totalTimeSpent(CompileBroker::get_total_compilation_time());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(CompilerConfiguration) {
+  EventCompilerConfiguration event;
+  event.set_threadCount(CICompilerCount);
+  event.set_tieredCompilation(TieredCompilation);
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(CodeCacheStatistics) {
+  EventCodeCacheStatistics event;
+  event.set_codeBlobType((u1)0/*bt*/); // XXX
+  event.set_startAddress((u8)CodeCache::low_bound());
+  event.set_reservedTopAddress((u8)CodeCache::high_bound());
+  event.set_entryCount(CodeCache::nof_blobs());
+  event.set_methodCount(CodeCache::nof_nmethods());
+  event.set_adaptorCount(CodeCache::nof_adapters());
+  event.set_unallocatedCapacity(CodeCache::unallocated_capacity());
+  event.set_fullCount(CodeCache::get_codemem_full_count());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(CodeCacheConfiguration) {
+  EventCodeCacheConfiguration event;
+  event.set_initialSize(InitialCodeCacheSize);
+  event.set_reservedSize(ReservedCodeCacheSize);
+  event.set_nonNMethodSize(0/*NonNMethodCodeHeapSize*/); // XXX
+  event.set_profiledSize(0/*ProfiledCodeHeapSize*/); // XXX
+  event.set_nonProfiledSize(0/*NonProfiledCodeHeapSize*/); // XXX
+  event.set_expansionSize(CodeCacheExpansionSize);
+  event.set_minBlockLength(CodeCacheMinBlockLength);
+  event.set_startAddress((u8)CodeCache::low_bound());
+  event.set_reservedTopAddress((u8)CodeCache::high_bound());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(CodeSweeperStatistics) {
+  EventCodeSweeperStatistics event;
+  event.set_sweepCount(NMethodSweeper::traversal_count());
+  event.set_methodReclaimedCount(NMethodSweeper::total_nof_methods_reclaimed());
+  event.set_totalSweepTime(NMethodSweeper::total_time_sweeping());
+  event.set_peakFractionTime(NMethodSweeper::peak_sweep_fraction_time());
+  event.set_peakSweepTime(NMethodSweeper::peak_sweep_time());
+  event.commit();
+}
+
+TRACE_REQUEST_FUNC(CodeSweeperConfiguration) {
+  EventCodeSweeperConfiguration event;
+  event.set_sweeperEnabled(MethodFlushing);
+  event.set_flushingEnabled(UseCodeCacheFlushing);
+  event.commit();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
+#include "jfr/support/jfrThreadId.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+
+jlong JfrThreadCPULoadEvent::get_wallclock_time() {
+  return os::javaTimeNanos();
+}
+
+int JfrThreadCPULoadEvent::_last_active_processor_count = 0;
+
+int JfrThreadCPULoadEvent::get_processor_count() {
+  int cur_processor_count = os::active_processor_count();
+  int last_processor_count = _last_active_processor_count;
+  _last_active_processor_count = cur_processor_count;
+
+  // If the number of processors decreases, we don't know at what point during
+  // the sample interval this happened, so use the largest number to try
+  // to avoid percentages above 100%
+  return MAX2(cur_processor_count, last_processor_count);
+}
+
+// Returns false if the thread has not been scheduled since the last call to updateEvent
+// (i.e. the delta for both system and user time is 0 milliseconds)
+bool JfrThreadCPULoadEvent::update_event(EventThreadCPULoad& event, JavaThread* thread, jlong cur_wallclock_time, int processor_count) {
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+
+  jlong cur_cpu_time = os::thread_cpu_time(thread, true);
+  jlong prev_cpu_time = tl->get_cpu_time();
+
+  jlong prev_wallclock_time = tl->get_wallclock_time();
+  tl->set_wallclock_time(cur_wallclock_time);
+
+  // Threshold of 1 ms
+  if (cur_cpu_time - prev_cpu_time < 1 * NANOSECS_PER_MILLISEC) {
+    return false;
+  }
+
+  jlong cur_user_time = os::thread_cpu_time(thread, false);
+  jlong prev_user_time = tl->get_user_time();
+
+  jlong cur_system_time = cur_cpu_time - cur_user_time;
+  jlong prev_system_time = prev_cpu_time - prev_user_time;
+
+  // The user and total cpu usage clocks can have different resolutions, which can
+  // make us see decreasing system time. Ensure time doesn't go backwards.
+  if (prev_system_time > cur_system_time) {
+    cur_cpu_time += prev_system_time - cur_system_time;
+    cur_system_time = prev_system_time;
+  }
+
+  jlong user_time = cur_user_time - prev_user_time;
+  jlong system_time = cur_system_time - prev_system_time;
+  jlong wallclock_time = cur_wallclock_time - prev_wallclock_time;
+  jlong total_available_time = wallclock_time * processor_count;
+
+  // Avoid reporting percentages above the theoretical max
+  if (user_time + system_time > wallclock_time) {
+    jlong excess = user_time + system_time - wallclock_time;
+    if (user_time > excess) {
+      user_time -= excess;
+      cur_user_time -= excess;
+      cur_cpu_time -= excess;
+    } else {
+      cur_cpu_time -= excess;
+      excess -= user_time;
+      user_time = 0;
+      cur_user_time = 0;
+      system_time -= excess;
+    }
+  }
+  event.set_user(total_available_time > 0 ? (double)user_time / total_available_time : 0);
+  event.set_system(total_available_time > 0 ? (double)system_time / total_available_time : 0);
+  tl->set_user_time(cur_user_time);
+  tl->set_cpu_time(cur_cpu_time);
+  return true;
+}
+
+void JfrThreadCPULoadEvent::send_events() {
+  Thread* periodic_thread = Thread::current();
+  JfrThreadLocal* const periodic_thread_tl = periodic_thread->jfr_thread_local();
+  traceid periodic_thread_id = periodic_thread_tl->thread_id();
+  const int processor_count = JfrThreadCPULoadEvent::get_processor_count();
+  JfrTicks event_time = JfrTicks::now();
+  jlong cur_wallclock_time = JfrThreadCPULoadEvent::get_wallclock_time();
+
+  {
+    MutexLockerEx ml(Threads_lock);
+    unsigned jt_count = 0;
+    for (JavaThread *jt = Threads::first(); jt != NULL; jt = jt->next()) {
+      EventThreadCPULoad event(UNTIMED);
+      if (JfrThreadCPULoadEvent::update_event(event, jt, cur_wallclock_time, processor_count)) {
+        event.set_starttime(event_time);
+        if (jt != periodic_thread) {
+          // Commit reads the thread id from this thread's trace data, so put it there temporarily
+          periodic_thread_tl->set_thread_id(JFR_THREAD_ID(jt));
+        } else {
+          periodic_thread_tl->set_thread_id(periodic_thread_id);
+        }
+        event.commit();
+      }
+      jt_count++;
+    }
+    if (LogJFR && Verbose) tty->print_cr("Measured CPU usage for %d threads in %.3f milliseconds", jt_count,
+      (double)(JfrTicks::now() - event_time).milliseconds());
+  }
+  // Restore this thread's thread id
+  periodic_thread_tl->set_thread_id(periodic_thread_id);
+}
+
+void JfrThreadCPULoadEvent::send_event_for_thread(JavaThread* jt) {
+  EventThreadCPULoad event;
+  if (event.should_commit()) {
+    if (update_event(event, jt, get_wallclock_time(), get_processor_count())) {
+      event.commit();
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_PERIODIC_JFRTHREADCPULOAD_HPP
+#define SHARE_VM_JFR_PERIODIC_JFRTHREADCPULOAD_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+
+class JavaThread;
+class EventThreadCPULoad;
+
+class JfrThreadCPULoadEvent : public AllStatic {
+  static int _last_active_processor_count;
+ public:
+  static jlong get_wallclock_time();
+  static int get_processor_count();
+  static bool update_event(EventThreadCPULoad& event, JavaThread* thread, jlong cur_wallclock_time, int processor_count);
+  static void send_events();
+  static void send_event_for_thread(JavaThread* jt);
+};
+
+#endif // SHARE_VM_JFR_PERIODIC_JFRTHREADCPULOAD_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrThreadDumpEvent.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/dcmd/jfrDcmds.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/periodic/jfrThreadDumpEvent.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/ostream.hpp"
+
+/**
+*  Worker impl for generating and writing dcmd commands
+*  as jfr events.
+*  dispatch to diagnosticcommands "parse_and_execute"
+*
+*  param: cmd = the DCMD to execute (including options)
+*/
+static bool execute_dcmd(bufferedStream& st, const char* const cmd) {
+  Thread* THREAD = Thread::current();
+  assert(!HAS_PENDING_EXCEPTION, "dcmd does not expect pending exceptions on entry!");
+  // delegate to DCmd execution
+  DCmd::parse_and_execute(DCmd_Source_Internal, &st, cmd, ' ', THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    if (LogJFR) tty->print_cr("unable to create jfr event for DCMD %s", cmd);
+    if (LogJFR) tty->print_cr("exception type: %s", PENDING_EXCEPTION->klass()->external_name());
+    // don't unwind this exception
+    CLEAR_PENDING_EXCEPTION;
+    // if exception occurred,
+    // reset stream.
+    st.reset();
+    return false;
+  }
+  return true;
+}
+
+// caller needs ResourceMark
+const char* JfrDcmdEvent::thread_dump() {
+  assert(EventThreadDump::is_enabled(), "invariant");
+  bufferedStream st;
+  execute_dcmd(st, "Thread.print");
+  return st.as_string();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/jfrThreadDumpEvent.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_PERIODIC_JFRDCMDEVENT_HPP
+#define SHARE_VM_JFR_PERIODIC_JFRDCMDEVENT_HPP
+
+#include "memory/allocation.hpp"
+
+/*
+ *  Helper for generating jfr events using output data from Dcmd's.
+ */
+class JfrDcmdEvent : public AllStatic {
+ public:
+  // caller needs ResourceMark
+  static const char* thread_dump();
+};
+
+#endif // SHARE_VM_JFR_PERIODIC_JFRDCMDEVENT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/sampling/jfrCallTrace.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/debugInfoRec.hpp"
+#include "code/nmethod.hpp"
+#include "code/pcDesc.hpp"
+#include "jfr/periodic/sampling/jfrCallTrace.hpp"
+#include "oops/method.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/registerMap.hpp"
+#include "runtime/thread.inline.hpp"
+
+bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& first_frame) {
+  assert(top_frame.cb() != NULL, "invariant");
+  RegisterMap map(_thread, false);
+  frame candidate = top_frame;
+  for (int i = 0; i < MaxJavaStackTraceDepth * 2; ++i) {
+    if (candidate.is_entry_frame()) {
+      JavaCallWrapper *jcw = candidate.entry_frame_call_wrapper_if_safe(_thread);
+      if (jcw == NULL || jcw->is_first_frame()) {
+        return false;
+      }
+    }
+
+    if (candidate.is_interpreted_frame()) {
+      JavaThreadState state = _thread->thread_state();
+      const bool known_valid = (state == _thread_in_native || state == _thread_in_vm || state == _thread_blocked);
+      if (known_valid || candidate.is_interpreted_frame_valid(_thread)) {
+        Method* im = candidate.interpreter_frame_method();
+        if (known_valid && !im->is_valid_method()) {
+          return false;
+        }
+        *method = im;
+        first_frame = candidate;
+        return true;
+      }
+    }
+
+    if (candidate.cb()->is_nmethod()) {
+      // first check to make sure that we have a sane stack,
+      // the PC is actually inside the code part of the codeBlob,
+      // and we are past is_frame_complete_at (stack has been setup)
+      if (!candidate.safe_for_sender(_thread)) {
+        return false;
+      }
+      nmethod* nm = (nmethod*)candidate.cb();
+      *method = nm->method();
+
+      if (_in_java) {
+        PcDesc* pc_desc = nm->pc_desc_near(candidate.pc() + 1);
+        if (pc_desc == NULL || pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
+          return false;
+        }
+        candidate.set_pc(pc_desc->real_pc(nm));
+        assert(nm->pc_desc_at(candidate.pc()) != NULL, "invalid pc");
+      }
+      first_frame = candidate;
+      return true;
+    }
+
+    if (!candidate.safe_for_sender(_thread) ||
+      candidate.is_stub_frame() ||
+      candidate.cb()->frame_size() <= 0) {
+      return false;
+    }
+
+    candidate = candidate.sender(&map);
+    if (candidate.cb() == NULL) {
+      return false;
+    }
+  }
+  return false;
+}
+
+bool JfrGetCallTrace::get_topframe(void* ucontext, frame& topframe) {
+  if (!_thread->pd_get_top_frame_for_profiling(&topframe, ucontext, _in_java)) {
+    return false;
+  }
+
+  if (topframe.cb() == NULL) {
+    return false;
+  }
+
+  frame first_java_frame;
+  Method* method = NULL;
+  if (find_top_frame(topframe, &method, first_java_frame)) {
+    if (method == NULL) {
+      return false;
+    }
+    topframe = first_java_frame;
+    return true;
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/sampling/jfrCallTrace.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_ENGINE_SAMPLING_JFRCALLTRACE_HPP
+#define SHARE_VM_JFR_ENGINE_SAMPLING_JFRCALLTRACE_HPP
+
+#include "memory/allocation.hpp"
+
+class frame;
+class Method;
+class JavaThread;
+
+class JfrGetCallTrace : public StackObj {
+ private:
+  JavaThread* _thread;
+  bool _in_java;
+
+ public:
+  JfrGetCallTrace(bool in_java, JavaThread* thread) : _in_java(in_java), _thread(thread) {}
+  bool find_top_frame(frame& topframe, Method** method, frame& first_frame);
+  bool get_topframe(void* ucontext, frame& top);
+};
+
+#endif // SHARE_VM_JFR_ENGINE_SAMPLING_JFRCALLTRACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/periodic/sampling/jfrCallTrace.hpp"
+#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/support/jfrThreadId.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/semaphore.hpp"
+#include "runtime/thread.inline.hpp"
+
+enum JfrSampleType {
+  NO_SAMPLE = 0,
+  JAVA_SAMPLE = 1,
+  NATIVE_SAMPLE = 2
+};
+
+static bool thread_state_in_java(JavaThread* thread) {
+  assert(thread != NULL, "invariant");
+  switch(thread->thread_state()) {
+    case _thread_new:
+    case _thread_uninitialized:
+    case _thread_new_trans:
+    case _thread_in_vm_trans:
+    case _thread_blocked_trans:
+    case _thread_in_native_trans:
+    case _thread_blocked:
+    case _thread_in_vm:
+    case _thread_in_native:
+    case _thread_in_Java_trans:
+      break;
+    case _thread_in_Java:
+      return true;
+    default:
+      ShouldNotReachHere();
+      break;
+  }
+  return false;
+}
+
+static bool thread_state_in_native(JavaThread* thread) {
+  assert(thread != NULL, "invariant");
+  switch(thread->thread_state()) {
+    case _thread_new:
+    case _thread_uninitialized:
+    case _thread_new_trans:
+    case _thread_blocked_trans:
+    case _thread_blocked:
+    case _thread_in_vm:
+    case _thread_in_vm_trans:
+    case _thread_in_Java_trans:
+    case _thread_in_Java:
+    case _thread_in_native_trans:
+      break;
+    case _thread_in_native:
+      return true;
+    default:
+      ShouldNotReachHere();
+      break;
+  }
+  return false;
+}
+
+class JfrThreadSampleClosure {
+ public:
+  JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native);
+  ~JfrThreadSampleClosure() {}
+  EventExecutionSample* next_event() { return &_events[_added_java++]; }
+  EventNativeMethodSample* next_event_native() { return &_events_native[_added_native++]; }
+  void commit_events(JfrSampleType type);
+  bool do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type);
+  uint java_entries() { return _added_java; }
+  uint native_entries() { return _added_native; }
+
+ private:
+  bool sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames);
+  bool sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames);
+  EventExecutionSample* _events;
+  EventNativeMethodSample* _events_native;
+  Thread* _self;
+  uint _added_java;
+  uint _added_native;
+};
+
+class OSThreadSampler : public os::SuspendedThreadTask {
+ public:
+  OSThreadSampler(JavaThread* thread,
+                  JfrThreadSampleClosure& closure,
+                  JfrStackFrame *frames,
+                  u4 max_frames) : os::SuspendedThreadTask((Thread*)thread),
+    _success(false),
+    _stacktrace(frames, max_frames),
+    _closure(closure),
+    _suspend_time() {}
+
+  void take_sample();
+  void do_task(const os::SuspendedThreadTaskContext& context);
+  void protected_task(const os::SuspendedThreadTaskContext& context);
+  bool success() const { return _success; }
+  const JfrStackTrace& stacktrace() const { return _stacktrace; }
+
+ private:
+  bool _success;
+  JfrStackTrace _stacktrace;
+  JfrThreadSampleClosure& _closure;
+  JfrTicks _suspend_time;
+};
+
+class OSThreadSamplerCallback : public os::CrashProtectionCallback {
+ public:
+  OSThreadSamplerCallback(OSThreadSampler& sampler, const os::SuspendedThreadTaskContext &context) :
+    _sampler(sampler), _context(context) {
+  }
+  virtual void call() {
+    _sampler.protected_task(_context);
+  }
+ private:
+  OSThreadSampler& _sampler;
+  const os::SuspendedThreadTaskContext& _context;
+};
+
+void OSThreadSampler::do_task(const os::SuspendedThreadTaskContext& context) {
+#ifndef ASSERT
+  guarantee(JfrOptionSet::sample_protection(), "Sample Protection should be on in product builds");
+#endif
+  assert(_suspend_time.value() == 0, "already timestamped!");
+  _suspend_time = JfrTicks::now();
+
+  if (JfrOptionSet::sample_protection()) {
+    OSThreadSamplerCallback cb(*this, context);
+    os::ThreadCrashProtection crash_protection;
+    if (!crash_protection.call(cb)) {
+      if (true) tty->print_cr("Thread method sampler crashed");
+    }
+  } else {
+    protected_task(context);
+  }
+}
+
+/*
+* From this method and down the call tree we attempt to protect against crashes
+* using a signal handler / __try block. Don't take locks, rely on destructors or
+* leave memory (in case of signal / exception) in an inconsistent state. */
+void OSThreadSampler::protected_task(const os::SuspendedThreadTaskContext& context) {
+  JavaThread* jth = (JavaThread*)context.thread();
+  // Skip sample if we signaled a thread that moved to other state
+  if (!thread_state_in_java(jth)) {
+    return;
+  }
+  JfrGetCallTrace trace(true, jth);
+  frame topframe;
+  if (trace.get_topframe(context.ucontext(), topframe)) {
+    if (_stacktrace.record_thread(*jth, topframe)) {
+      /* If we managed to get a topframe and a stacktrace, create an event
+      * and put it into our array. We can't call Jfr::_stacktraces.add()
+      * here since it would allocate memory using malloc. Doing so while
+      * the stopped thread is inside malloc would deadlock. */
+      _success = true;
+      EventExecutionSample *ev = _closure.next_event();
+      ev->set_starttime(_suspend_time);
+      ev->set_endtime(_suspend_time); // fake to not take an end time
+      ev->set_sampledThread(JFR_THREAD_ID(jth));
+      ev->set_state(java_lang_Thread::get_thread_status(jth->threadObj()));
+    }
+  }
+}
+
+void OSThreadSampler::take_sample() {
+  run();
+}
+
+class JfrNativeSamplerCallback : public os::CrashProtectionCallback {
+ public:
+  JfrNativeSamplerCallback(JfrThreadSampleClosure& closure, JavaThread* jt, JfrStackFrame* frames, u4 max_frames) :
+    _closure(closure), _jt(jt), _stacktrace(frames, max_frames), _success(false) {
+  }
+  virtual void call();
+  bool success() { return _success; }
+  JfrStackTrace& stacktrace() { return _stacktrace; }
+
+ private:
+  JfrThreadSampleClosure& _closure;
+  JavaThread* _jt;
+  JfrStackTrace _stacktrace;
+  bool _success;
+};
+
+static void write_native_event(JfrThreadSampleClosure& closure, JavaThread* jt) {
+  EventNativeMethodSample *ev = closure.next_event_native();
+  ev->set_starttime(JfrTicks::now());
+  ev->set_sampledThread(JFR_THREAD_ID(jt));
+  ev->set_state(java_lang_Thread::get_thread_status(jt->threadObj()));
+}
+
+void JfrNativeSamplerCallback::call() {
+  // When a thread is only attach it will be native without a last java frame
+  if (!_jt->has_last_Java_frame()) {
+    return;
+  }
+
+  frame topframe = _jt->last_frame();
+  frame first_java_frame;
+  Method* method = NULL;
+  JfrGetCallTrace gct(false, _jt);
+  if (!gct.find_top_frame(topframe, &method, first_java_frame)) {
+    return;
+  }
+  if (method == NULL) {
+    return;
+  }
+  topframe = first_java_frame;
+  _success = _stacktrace.record_thread(*_jt, topframe);
+  if (_success) {
+    write_native_event(_closure, _jt);
+  }
+}
+
+bool JfrThreadSampleClosure::sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) {
+  OSThreadSampler sampler(thread, *this, frames, max_frames);
+  sampler.take_sample();
+  /* We don't want to allocate any memory using malloc/etc while the thread
+  * is stopped, so everything is stored in stack allocated memory until this
+  * point where the thread has been resumed again, if the sampling was a success
+  * we need to store the stacktrace in the stacktrace repository and update
+  * the event with the id that was returned. */
+  if (!sampler.success()) {
+    return false;
+  }
+  EventExecutionSample *event = &_events[_added_java - 1];
+  traceid id = JfrStackTraceRepository::add(sampler.stacktrace());
+  assert(id != 0, "Stacktrace id should not be 0");
+  event->set_stackTrace(id);
+  return true;
+}
+
+bool JfrThreadSampleClosure::sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) {
+  JfrNativeSamplerCallback cb(*this, thread, frames, max_frames);
+  if (JfrOptionSet::sample_protection()) {
+    os::ThreadCrashProtection crash_protection;
+    if (!crash_protection.call(cb)) {
+      if (true) tty->print_cr("Thread method sampler crashed for native");
+    }
+  } else {
+    cb.call();
+  }
+  if (!cb.success()) {
+    return false;
+  }
+  EventNativeMethodSample *event = &_events_native[_added_native - 1];
+  traceid id = JfrStackTraceRepository::add(cb.stacktrace());
+  assert(id != 0, "Stacktrace id should not be 0");
+  event->set_stackTrace(id);
+  return true;
+}
+
+static const uint MAX_NR_OF_JAVA_SAMPLES = 5;
+static const uint MAX_NR_OF_NATIVE_SAMPLES = 1;
+
+void JfrThreadSampleClosure::commit_events(JfrSampleType type) {
+  if (JAVA_SAMPLE == type) {
+    assert(_added_java <= MAX_NR_OF_JAVA_SAMPLES, "invariant");
+    for (uint i = 0; i < _added_java; ++i) {
+      _events[i].commit();
+    }
+  } else {
+    assert(NATIVE_SAMPLE == type, "invariant");
+    assert(_added_native <= MAX_NR_OF_NATIVE_SAMPLES, "invariant");
+    for (uint i = 0; i < _added_native; ++i) {
+      _events_native[i].commit();
+    }
+  }
+}
+
+JfrThreadSampleClosure::JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native) :
+  _events(events),
+  _events_native(events_native),
+  _self(Thread::current()),
+  _added_java(0),
+  _added_native(0) {
+}
+
+class JfrThreadSampler : public Thread {
+  friend class JfrThreadSampling;
+ private:
+  Semaphore _sample;
+  Thread* _sampler_thread;
+  JfrStackFrame* const _frames;
+  JavaThread* _last_thread_java;
+  JavaThread* _last_thread_native;
+  size_t _interval_java;
+  size_t _interval_native;
+  int _cur_index;
+  const u4 _max_frames;
+  volatile bool _disenrolled;
+  static Monitor* _transition_block_lock;
+
+//  JavaThread* next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current);
+  void task_stacktrace(JfrSampleType type, JavaThread** last_thread);
+  JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames);
+  ~JfrThreadSampler();
+
+  void start_thread();
+
+  void enroll();
+  void disenroll();
+  void set_java_interval(size_t interval) { _interval_java = interval; };
+  void set_native_interval(size_t interval) { _interval_native = interval; };
+  size_t get_java_interval() { return _interval_java; };
+  size_t get_native_interval() { return _interval_native; };
+
+ public:
+  void run();
+  static Monitor* transition_block() { return _transition_block_lock; }
+  static void on_javathread_suspend(JavaThread* thread);
+};
+
+Monitor* JfrThreadSampler::_transition_block_lock = new Monitor(Mutex::leaf, "Trace block", true);
+
+static void clear_transition_block(JavaThread* jt) {
+//  jt->clear_trace_flag();
+  JfrThreadLocal* const tl = jt->jfr_thread_local();
+  if (tl->is_trace_block()) {
+    MutexLockerEx ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag);
+    JfrThreadSampler::transition_block()->notify_all();
+  }
+}
+
+bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type) {
+  assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
+  if (thread->is_hidden_from_external_view() || thread->in_deopt_handler()) {
+    return false;
+  }
+
+  bool ret = false;
+//  thread->set_trace_flag();
+  if (!UseMembar) {
+    os::serialize_thread_states();
+  }
+  if (JAVA_SAMPLE == type) {
+    if (thread_state_in_java(thread)) {
+      ret = sample_thread_in_java(thread, frames, max_frames);
+    }
+  } else {
+    assert(NATIVE_SAMPLE == type, "invariant");
+    if (thread_state_in_native(thread)) {
+      ret = sample_thread_in_native(thread, frames, max_frames);
+    }
+  }
+  clear_transition_block(thread);
+  return ret;
+}
+
+JfrThreadSampler::JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames) :
+  _sample(),
+  _sampler_thread(NULL),
+  _frames(JfrCHeapObj::new_array<JfrStackFrame>(max_frames)),
+  _last_thread_java(NULL),
+  _last_thread_native(NULL),
+  _interval_java(interval_java),
+  _interval_native(interval_native),
+  _cur_index(-1),
+  _max_frames(max_frames),
+  _disenrolled(true) {
+}
+
+JfrThreadSampler::~JfrThreadSampler() {
+  JfrCHeapObj::free(_frames, sizeof(JfrStackFrame) * _max_frames);
+}
+
+void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) {
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  tl->set_trace_block();
+  {
+//    MutexLockerEx ml(transition_block(), Mutex::_no_safepoint_check_flag);
+//    while (thread->is_trace_suspend()) {
+//      transition_block()->wait(true);
+//    }
+//    tl->clear_trace_block();
+  }
+}
+
+//JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) {
+//  assert(t_list != NULL, "invariant");
+//  assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
+//  assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant");
+//  assert((current == NULL && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant");
+//  if ((uint)_cur_index + 1 == t_list->length()) {
+//    // wrap
+//    _cur_index = 0;
+//  } else {
+//    _cur_index++;
+//  }
+//  assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant");
+//  JavaThread* const next = t_list->thread_at(_cur_index);
+//  return next != first_sampled ? next : NULL;
+//}
+
+void JfrThreadSampler::start_thread() {
+  // XXX TODO implement sampling
+//  if (os::create_thread(this, os::os_thread)) {
+//    os::start_thread(this);
+//  } else {
+//    if (true) tty->print_cr("Failed to create thread for thread sampling");
+//  }
+}
+
+void JfrThreadSampler::enroll() {
+  if (_disenrolled) {
+    if (LogJFR) tty->print_cr("Enrolling thread sampler");
+    _sample.signal();
+    _disenrolled = false;
+  }
+}
+
+void JfrThreadSampler::disenroll() {
+  if (!_disenrolled) {
+    _sample.wait();
+    _disenrolled = true;
+    if (LogJFR) tty->print_cr("Disenrolling thread sampler");
+  }
+}
+
+static jlong get_monotonic_ms() {
+  return os::javaTimeNanos() / 1000000;
+}
+
+void JfrThreadSampler::run() {
+  assert(_sampler_thread == NULL, "invariant");
+
+  initialize_thread_local_storage();
+  record_stack_base_and_size();
+
+  _sampler_thread = this;
+
+  jlong last_java_ms = get_monotonic_ms();
+  jlong last_native_ms = last_java_ms;
+  while (true) {
+    if (!_sample.trywait()) {
+      // disenrolled
+      _sample.wait();
+      last_java_ms = get_monotonic_ms();
+      last_native_ms = last_java_ms;
+    }
+    _sample.signal();
+    jlong java_interval = _interval_java == 0 ? max_jlong : MAX2<jlong>(_interval_java, 10);
+    jlong native_interval = _interval_native == 0 ? max_jlong : MAX2<jlong>(_interval_native, 10);
+
+    jlong now_ms = get_monotonic_ms();
+
+    jlong next_j = java_interval + last_java_ms - now_ms;
+    jlong next_n = native_interval + last_native_ms - now_ms;
+
+    jlong sleep_to_next = MIN2<jlong>(next_j, next_n);
+
+    if (sleep_to_next > 0) {
+      os::naked_short_sleep(sleep_to_next);
+    }
+
+    if ((next_j - sleep_to_next) <= 0) {
+      task_stacktrace(JAVA_SAMPLE, &_last_thread_java);
+      last_java_ms = get_monotonic_ms();
+    }
+    if ((next_n - sleep_to_next) <= 0) {
+      task_stacktrace(NATIVE_SAMPLE, &_last_thread_native);
+      last_native_ms = get_monotonic_ms();
+    }
+  }
+  delete this;
+}
+
+
+void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thread) {
+  ResourceMark rm;
+  EventExecutionSample samples[MAX_NR_OF_JAVA_SAMPLES];
+  EventNativeMethodSample samples_native[MAX_NR_OF_NATIVE_SAMPLES];
+  JfrThreadSampleClosure sample_task(samples, samples_native);
+
+  const uint sample_limit = JAVA_SAMPLE == type ? MAX_NR_OF_JAVA_SAMPLES : MAX_NR_OF_NATIVE_SAMPLES;
+  uint num_sample_attempts = 0;
+  JavaThread* start = NULL;
+
+  {
+    elapsedTimer sample_time;
+    sample_time.start();
+    {
+//      MonitorLockerEx tlock(Threads_lock, Mutex::_allow_vm_block_flag);
+//      ThreadsListHandle tlh;
+//      // Resolve a sample session relative start position index into the thread list array.
+//      // In cases where the last sampled thread is NULL or not-NULL but stale, find_index() returns -1.
+//      _cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
+//      JavaThread* current = _cur_index != -1 ? *last_thread : NULL;
+//
+//      while (num_sample_attempts < sample_limit) {
+//        current = next_thread(tlh.list(), start, current);
+//        if (current == NULL) {
+//          break;
+//        }
+//        if (start == NULL) {
+//          start = current;  // remember the thread where we started to attempt sampling
+//        }
+//        if (current->is_Compiler_thread()) {
+//          continue;
+//        }
+//        sample_task.do_sample_thread(current, _frames, _max_frames, type);
+//        num_sample_attempts++;
+//      }
+//      *last_thread = current;  // remember the thread we last attempted to sample
+    }
+    sample_time.stop();
+    if (LogJFR && Verbose) tty->print_cr("JFR thread sampling done in %3.7f secs with %d java %d native samples",
+                   sample_time.seconds(), sample_task.java_entries(), sample_task.native_entries());
+  }
+  if (num_sample_attempts > 0) {
+    sample_task.commit_events(type);
+  }
+}
+
+static JfrThreadSampling* _instance = NULL;
+
+JfrThreadSampling& JfrThreadSampling::instance() {
+  return *_instance;
+}
+
+JfrThreadSampling* JfrThreadSampling::create() {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrThreadSampling();
+  return _instance;
+}
+
+void JfrThreadSampling::destroy() {
+  if (_instance != NULL) {
+    delete _instance;
+    _instance = NULL;
+  }
+}
+
+JfrThreadSampling::JfrThreadSampling() : _sampler(NULL) {}
+
+JfrThreadSampling::~JfrThreadSampling() {
+  if (_sampler != NULL) {
+    _sampler->disenroll();
+  }
+}
+
+static void log(size_t interval_java, size_t interval_native) {
+  if (LogJFR) tty->print_cr("Updated thread sampler for java: " SIZE_FORMAT "  ms, native " SIZE_FORMAT " ms", interval_java, interval_native);
+}
+
+void JfrThreadSampling::start_sampler(size_t interval_java, size_t interval_native) {
+  assert(_sampler == NULL, "invariant");
+  if (LogJFR) tty->print_cr("Enrolling thread sampler");
+  _sampler = new JfrThreadSampler(interval_java, interval_native, JfrOptionSet::stackdepth());
+  _sampler->start_thread();
+  _sampler->enroll();
+}
+
+void JfrThreadSampling::set_sampling_interval(bool java_interval, size_t period) {
+  size_t interval_java = 0;
+  size_t interval_native = 0;
+  if (_sampler != NULL) {
+    interval_java = _sampler->get_java_interval();
+    interval_native = _sampler->get_native_interval();
+  }
+  if (java_interval) {
+    interval_java = period;
+  } else {
+    interval_native = period;
+  }
+  if (interval_java > 0 || interval_native > 0) {
+    if (_sampler == NULL) {
+      if (LogJFR) tty->print_cr("Creating thread sampler for java:%zu ms, native %zu ms", interval_java, interval_native);
+      start_sampler(interval_java, interval_native);
+    } else {
+      _sampler->set_java_interval(interval_java);
+      _sampler->set_native_interval(interval_native);
+      _sampler->enroll();
+    }
+    assert(_sampler != NULL, "invariant");
+    log(interval_java, interval_native);
+  } else if (_sampler != NULL) {
+    _sampler->disenroll();
+  }
+}
+
+void JfrThreadSampling::set_java_sample_interval(size_t period) {
+  if (_instance == NULL && 0 == period) {
+    return;
+  }
+  instance().set_sampling_interval(true, period);
+}
+
+void JfrThreadSampling::set_native_sample_interval(size_t period) {
+  if (_instance == NULL && 0 == period) {
+    return;
+  }
+  instance().set_sampling_interval(false, period);
+}
+
+void JfrThreadSampling::on_javathread_suspend(JavaThread* thread) {
+  JfrThreadSampler::on_javathread_suspend(thread);
+}
+
+Thread* JfrThreadSampling::sampler_thread() {
+  if (_instance == NULL) {
+    return NULL;
+  }
+  return _instance->_sampler != NULL ? _instance->_sampler->_sampler_thread : NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLER_HPP
+#define SHARE_VM_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLER_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JavaThread;
+class JfrStackFrame;
+class JfrThreadSampler;
+class Thread;
+
+class JfrThreadSampling : public JfrCHeapObj {
+  friend class JfrRecorder;
+ private:
+  JfrThreadSampler* _sampler;
+  void start_sampler(size_t interval_java, size_t interval_native);
+  void set_sampling_interval(bool java_interval, size_t period);
+
+  JfrThreadSampling();
+  ~JfrThreadSampling();
+
+  static JfrThreadSampling& instance();
+  static JfrThreadSampling* create();
+  static void destroy();
+
+ public:
+  static void set_java_sample_interval(size_t period);
+  static void set_native_sample_interval(size_t period);
+  static void on_javathread_suspend(JavaThread* thread);
+  static Thread* sampler_thread();
+};
+
+#endif // SHARE_VM_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrCheckpointBlob.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+
+JfrCheckpointBlob::JfrCheckpointBlob(const u1* checkpoint, size_t size) :
+  _checkpoint(JfrCHeapObj::new_array<u1>(size)),
+  _size(size),
+  _next(),
+  _written(false) {
+  assert(checkpoint != NULL, "invariant");
+  assert(_checkpoint != NULL, "invariant");
+  memcpy(const_cast<u1*>(_checkpoint), checkpoint, size);
+}
+
+JfrCheckpointBlob::~JfrCheckpointBlob() {
+  JfrCHeapObj::free(const_cast<u1*>(_checkpoint), _size);
+}
+
+const JfrCheckpointBlobHandle& JfrCheckpointBlob::next() const {
+  return _next;
+}
+
+void JfrCheckpointBlob::write_this(JfrCheckpointWriter& writer) const {
+  writer.bytes(_checkpoint, _size);
+}
+
+void JfrCheckpointBlob::exclusive_write(JfrCheckpointWriter& writer) const {
+  if (!_written) {
+    write_this(writer);
+    _written = true;
+  }
+  if (_next.valid()) {
+    _next->exclusive_write(writer);
+  }
+}
+
+void JfrCheckpointBlob::write(JfrCheckpointWriter& writer) const {
+  write_this(writer);
+  if (_next.valid()) {
+    _next->write(writer);
+  }
+}
+
+void JfrCheckpointBlob::reset_write_state() const {
+  if (_written) {
+    _written = false;
+  }
+  if (_next.valid()) {
+    _next->reset_write_state();
+  }
+}
+
+void JfrCheckpointBlob::set_next(const JfrCheckpointBlobHandle& ref) {
+  if (_next == ref) {
+    return;
+  }
+  assert(_next != ref, "invariant");
+  if (_next.valid()) {
+    _next->set_next(ref);
+    return;
+  }
+  _next = ref;
+}
+
+JfrCheckpointBlobHandle JfrCheckpointBlob::make(const u1* checkpoint, size_t size) {
+  const JfrCheckpointBlob* cp_blob = new JfrCheckpointBlob(checkpoint, size);
+  assert(cp_blob != NULL, "invariant");
+  return JfrCheckpointBlobReference::make(cp_blob);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrCheckpointBlob.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrRefCountPointer.hpp"
+
+class JfrCheckpointBlob;
+class JfrCheckpointWriter;
+
+typedef RefCountPointer<JfrCheckpointBlob, MultiThreadedRefCounter> JfrCheckpointBlobReference;
+typedef RefCountHandle<JfrCheckpointBlobReference> JfrCheckpointBlobHandle;
+
+class JfrCheckpointBlob : public JfrCHeapObj {
+  template <typename, typename>
+  friend class RefCountPointer;
+ private:
+  const u1* _checkpoint;
+  const size_t _size;
+  JfrCheckpointBlobHandle _next;
+  mutable bool _written;
+
+  JfrCheckpointBlob(const u1* checkpoint, size_t size);
+  ~JfrCheckpointBlob();
+  const JfrCheckpointBlobHandle& next() const;
+  void write_this(JfrCheckpointWriter& writer) const;
+
+ public:
+  void write(JfrCheckpointWriter& writer) const;
+  void exclusive_write(JfrCheckpointWriter& writer) const;
+  void reset_write_state() const;
+  void set_next(const JfrCheckpointBlobHandle& ref);
+  static JfrCheckpointBlobHandle make(const u1* checkpoint, size_t size);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrCheckpointManager.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
+#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/utilities/jfrBigEndian.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+
+typedef JfrCheckpointManager::Buffer* BufferPtr;
+
+static JfrCheckpointManager* _instance = NULL;
+
+JfrCheckpointManager& JfrCheckpointManager::instance() {
+  return *_instance;
+}
+
+JfrCheckpointManager* JfrCheckpointManager::create(JfrChunkWriter& cw) {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrCheckpointManager(cw);
+  return _instance;
+}
+
+void JfrCheckpointManager::destroy() {
+  assert(_instance != NULL, "invariant");
+  delete _instance;
+  _instance = NULL;
+}
+
+JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) :
+  _free_list_mspace(NULL),
+  _epoch_transition_mspace(NULL),
+  _lock(NULL),
+  _service_thread(NULL),
+  _chunkwriter(cw),
+  _checkpoint_epoch_state(JfrTraceIdEpoch::current()) {}
+
+JfrCheckpointManager::~JfrCheckpointManager() {
+  if (_free_list_mspace != NULL) {
+    delete _free_list_mspace;
+  }
+  if (_epoch_transition_mspace != NULL) {
+    delete _epoch_transition_mspace;
+  }
+  if (_lock != NULL) {
+    delete _lock;
+  }
+  JfrTypeManager::clear();
+}
+
+static const size_t unlimited_mspace_size = 0;
+static const size_t checkpoint_buffer_cache_count = 2;
+static const size_t checkpoint_buffer_size = 512 * K;
+
+static JfrCheckpointMspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrCheckpointManager* system) {
+  JfrCheckpointMspace* mspace = new JfrCheckpointMspace(buffer_size, limit, cache_count, system);
+  if (mspace != NULL) {
+    mspace->initialize();
+  }
+  return mspace;
+}
+
+bool JfrCheckpointManager::initialize() {
+  assert(_free_list_mspace == NULL, "invariant");
+  _free_list_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
+  if (_free_list_mspace == NULL) {
+    return false;
+  }
+  assert(_epoch_transition_mspace == NULL, "invariant");
+  _epoch_transition_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
+  if (_epoch_transition_mspace == NULL) {
+    return false;
+  }
+  assert(_lock == NULL, "invariant");
+  _lock = new Mutex(Monitor::leaf - 1, "Checkpoint mutex", Mutex::_allow_vm_block_flag);
+  if (_lock == NULL) {
+    return false;
+  }
+  return JfrTypeManager::initialize();
+}
+
+bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
+  return _service_thread != thread && OrderAccess::load_acquire((u1*)&_checkpoint_epoch_state) != JfrTraceIdEpoch::current();
+}
+
+void JfrCheckpointManager::synchronize_epoch() {
+  assert(_checkpoint_epoch_state != JfrTraceIdEpoch::current(), "invariant");
+  OrderAccess::storestore();
+  _checkpoint_epoch_state = JfrTraceIdEpoch::current();
+}
+
+void JfrCheckpointManager::shift_epoch() {
+  debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
+  JfrTraceIdEpoch::shift_epoch();
+  assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
+}
+
+void JfrCheckpointManager::register_service_thread(const Thread* thread) {
+  _service_thread = thread;
+}
+
+void JfrCheckpointManager::register_full(BufferPtr t, Thread* thread) {
+  // nothing here at the moment
+  assert(t->retired(), "invariant");
+}
+
+void JfrCheckpointManager::lock() {
+  assert(!_lock->owned_by_self(), "invariant");
+  _lock->lock_without_safepoint_check();
+}
+
+void JfrCheckpointManager::unlock() {
+  _lock->unlock();
+}
+
+#ifdef ASSERT
+
+bool JfrCheckpointManager::is_locked() const {
+  return _lock->owned_by_self();
+}
+
+static void assert_free_lease(const BufferPtr buffer) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->acquired_by_self(), "invariant");
+  assert(buffer->lease(), "invariant");
+}
+
+static void assert_release(const BufferPtr buffer) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->lease(), "invariant");
+  assert(buffer->acquired_by_self(), "invariant");
+}
+
+#endif // ASSERT
+
+static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) {
+  static const size_t max_elem_size = mspace->min_elem_size(); // min is max
+  BufferPtr buffer;
+  if (size <= max_elem_size) {
+    BufferPtr buffer = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
+    if (buffer != NULL) {
+      DEBUG_ONLY(assert_free_lease(buffer);)
+      return buffer;
+    }
+  }
+  buffer = mspace_allocate_transient_lease_to_free(size, mspace, thread);
+  DEBUG_ONLY(assert_free_lease(buffer);)
+  return buffer;
+}
+
+static const size_t lease_retry = 10;
+
+BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
+  JfrCheckpointManager& manager = instance();
+  if (manager.use_epoch_transition_mspace(thread)) {
+    return lease_free(size, manager._epoch_transition_mspace, lease_retry, thread);
+  }
+  return lease_free(size, manager._free_list_mspace, lease_retry, thread);
+}
+
+/*
+* If the buffer was a "lease" from the free list, release back.
+*
+* The buffer is effectively invalidated for the thread post-return,
+* and the caller should take means to ensure that it is not referenced.
+*/
+static void release(BufferPtr const buffer, Thread* thread) {
+  DEBUG_ONLY(assert_release(buffer);)
+  buffer->clear_lease();
+  buffer->release();
+}
+
+BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
+  assert(old != NULL, "invariant");
+  assert(old->lease(), "invariant");
+  if (0 == requested) {
+    // indicates a lease is being returned
+    release(old, thread);
+    return NULL;
+  }
+  // migration of in-flight information
+  BufferPtr const new_buffer = lease_buffer(thread, used + requested);
+  if (new_buffer != NULL) {
+    migrate_outstanding_writes(old, new_buffer, used, requested);
+  }
+  release(old, thread);
+  return new_buffer; // might be NULL
+}
+
+// offsets into the JfrCheckpointEntry
+static const juint starttime_offset = sizeof(jlong);
+static const juint duration_offset = starttime_offset + sizeof(jlong);
+static const juint flushpoint_offset = duration_offset + sizeof(jlong);
+static const juint types_offset = flushpoint_offset + sizeof(juint);
+static const juint payload_offset = types_offset + sizeof(juint);
+
+template <typename Return>
+static Return read_data(const u1* data) {
+  return JfrBigEndian::read<Return>(data);
+}
+
+static jlong total_size(const u1* data) {
+  return read_data<jlong>(data);
+}
+
+static jlong starttime(const u1* data) {
+  return read_data<jlong>(data + starttime_offset);
+}
+
+static jlong duration(const u1* data) {
+  return read_data<jlong>(data + duration_offset);
+}
+
+static bool is_flushpoint(const u1* data) {
+  return read_data<juint>(data + flushpoint_offset) == (juint)1;
+}
+
+static juint number_of_types(const u1* data) {
+  return read_data<juint>(data + types_offset);
+}
+
+static void write_checkpoint_header(JfrChunkWriter& cw, intptr_t offset_prev_cp_event, const u1* data) {
+  cw.reserve(sizeof(u4));
+  cw.write((u8)EVENT_CHECKPOINT);
+  cw.write(starttime(data));
+  cw.write(duration(data));
+  cw.write((jlong)offset_prev_cp_event);
+  cw.write(is_flushpoint(data));
+  cw.write(number_of_types(data));
+}
+
+static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) {
+  assert(data != NULL, "invariant");
+  cw.write_unbuffered(data + payload_offset, size);
+}
+
+static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
+  assert(data != NULL, "invariant");
+  const intptr_t previous_checkpoint_event = cw.previous_checkpoint_offset();
+  const intptr_t event_begin = cw.current_offset();
+  const intptr_t offset_to_previous_checkpoint_event = 0 == previous_checkpoint_event ? 0 : previous_checkpoint_event - event_begin;
+  const jlong total_checkpoint_size = total_size(data);
+  write_checkpoint_header(cw, offset_to_previous_checkpoint_event, data);
+  write_checkpoint_content(cw, data, total_checkpoint_size - sizeof(JfrCheckpointEntry));
+  const jlong checkpoint_event_size = cw.current_offset() - event_begin;
+  cw.write_padded_at_offset<u4>(checkpoint_event_size, event_begin);
+  cw.set_previous_checkpoint_offset(event_begin);
+  return (size_t)total_checkpoint_size;
+}
+
+static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) {
+  assert(cw.is_valid(), "invariant");
+  assert(data != NULL, "invariant");
+  assert(size > 0, "invariant");
+  const u1* const limit = data + size;
+  const u1* next_entry = data;
+  size_t processed = 0;
+  while (next_entry < limit) {
+    const size_t checkpoint_size = write_checkpoint_event(cw, next_entry);
+    processed += checkpoint_size;
+    next_entry += checkpoint_size;
+  }
+  assert(next_entry == limit, "invariant");
+  return processed;
+}
+
+template <typename T>
+class CheckpointWriteOp {
+ private:
+  JfrChunkWriter& _writer;
+  size_t _processed;
+ public:
+  typedef T Type;
+  CheckpointWriteOp(JfrChunkWriter& writer) : _writer(writer), _processed(0) {}
+  bool write(Type* t, const u1* data, size_t size) {
+    _processed += write_checkpoints(_writer, data, size);
+    return true;
+  }
+  size_t processed() const { return _processed; }
+};
+
+typedef CheckpointWriteOp<JfrCheckpointMspace::Type> WriteOperation;
+typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
+typedef ReleaseOp<JfrCheckpointMspace> CheckpointReleaseOperation;
+typedef CompositeOperation<MutexedWriteOperation, CheckpointReleaseOperation> CheckpointWriteOperation;
+
+static size_t write_mspace_exclusive(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
+  Thread* const thread = Thread::current();
+  WriteOperation wo(chunkwriter);
+  MutexedWriteOperation mwo(wo);
+  CheckpointReleaseOperation cro(mspace, thread, false);
+  CheckpointWriteOperation cpwo(&mwo, &cro);
+  assert(mspace->is_full_empty(), "invariant");
+  process_free_list(cpwo, mspace);
+  return wo.processed();
+}
+
+size_t JfrCheckpointManager::write() {
+  const size_t processed = write_mspace_exclusive(_free_list_mspace, _chunkwriter);
+  synchronize_epoch();
+  return processed;
+}
+
+size_t JfrCheckpointManager::write_epoch_transition_mspace() {
+  return write_mspace_exclusive(_epoch_transition_mspace, _chunkwriter);
+}
+
+typedef DiscardOp<DefaultDiscarder<JfrBuffer> > DiscardOperation;
+size_t JfrCheckpointManager::clear() {
+  DiscardOperation discarder(mutexed); // mutexed discard mode
+  process_free_list(discarder, _free_list_mspace);
+  process_free_list(discarder, _epoch_transition_mspace);
+  synchronize_epoch();
+  return discarder.processed();
+}
+
+size_t JfrCheckpointManager::write_types() {
+  JfrCheckpointWriter writer(false, true, Thread::current());
+  JfrTypeManager::write_types(writer);
+  return writer.used_size();
+}
+
+size_t JfrCheckpointManager::write_safepoint_types() {
+  // this is also a "flushpoint"
+  JfrCheckpointWriter writer(true, true, Thread::current());
+  JfrTypeManager::write_safepoint_types(writer);
+  return writer.used_size();
+}
+
+void JfrCheckpointManager::write_type_set() {
+  JfrTypeManager::write_type_set();
+}
+
+void JfrCheckpointManager::write_type_set_for_unloaded_classes() {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  JfrTypeManager::write_type_set_for_unloaded_classes();
+}
+
+void JfrCheckpointManager::create_thread_checkpoint(JavaThread* jt) {
+  JfrTypeManager::create_thread_checkpoint(jt);
+}
+
+void JfrCheckpointManager::write_thread_checkpoint(JavaThread* jt) {
+  JfrTypeManager::write_thread_checkpoint(jt);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrCheckpointManager.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTMANAGER_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTMANAGER_HPP
+
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/recorder/storage/jfrMemorySpace.hpp"
+#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
+
+class JfrCheckpointManager;
+class JfrChunkWriter;
+class JfrSerializer;
+class JfrTypeManager;
+class Mutex;
+class Thread;
+
+struct JfrCheckpointEntry {
+  jlong size;
+  jlong start_time;
+  jlong duration;
+  juint flushpoint;
+  juint nof_segments;
+};
+
+typedef JfrMemorySpace<JfrBuffer, JfrMspaceSequentialRetrieval, JfrCheckpointManager> JfrCheckpointMspace;
+
+//
+// Responsible for maintaining checkpoints and by implication types.
+// A checkpoint is an event that has a payload consisting of constant types.
+// A constant type is a binary relation, a set of key-value pairs.
+//
+class JfrCheckpointManager : public JfrCHeapObj {
+ public:
+  typedef JfrCheckpointMspace::Type Buffer;
+ private:
+  JfrCheckpointMspace* _free_list_mspace;
+  JfrCheckpointMspace* _epoch_transition_mspace;
+  Mutex* _lock;
+  const Thread* _service_thread;
+  JfrChunkWriter& _chunkwriter;
+  u1 _checkpoint_epoch_state;
+
+  // mspace callback
+  void register_full(Buffer* t, Thread* thread);
+  void lock();
+  void unlock();
+  DEBUG_ONLY(bool is_locked() const;)
+
+  static Buffer* lease_buffer(Thread* t, size_t size = 0);
+  static Buffer* flush(Buffer* old, size_t used, size_t requested, Thread* t);
+
+  size_t clear();
+  size_t write();
+  size_t write_epoch_transition_mspace();
+  size_t write_types();
+  size_t write_safepoint_types();
+  void write_type_set();
+  void shift_epoch();
+  void synchronize_epoch();
+  bool use_epoch_transition_mspace(const Thread* t) const;
+
+  JfrCheckpointManager(JfrChunkWriter& cw);
+  ~JfrCheckpointManager();
+
+  static JfrCheckpointManager& instance();
+  static JfrCheckpointManager* create(JfrChunkWriter& cw);
+  bool initialize();
+  static void destroy();
+
+ public:
+  void register_service_thread(const Thread* t);
+  static void write_type_set_for_unloaded_classes();
+  static void create_thread_checkpoint(JavaThread* jt);
+  static void write_thread_checkpoint(JavaThread* jt);
+
+  friend class JfrRecorder;
+  friend class JfrRecorderService;
+  friend class JfrCheckpointFlush;
+  friend class JfrCheckpointWriter;
+  friend class JfrSerializer;
+  friend class JfrStackTraceRepository;
+  template <typename, template <typename> class, typename>
+  friend class JfrMemorySpace;
+};
+
+#endif //SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTMANAGER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/writers/jfrBigEndianWriter.hpp"
+
+JfrCheckpointFlush::JfrCheckpointFlush(Type* old, size_t used, size_t requested, Thread* t) :
+  _result(JfrCheckpointManager::flush(old, used, requested, t)) {}
+
+JfrCheckpointWriter::JfrCheckpointWriter(bool flushpoint, bool header, Thread* thread) :
+  JfrCheckpointWriterBase(JfrCheckpointManager::lease_buffer(thread), thread),
+  _time(JfrTicks::now()),
+  _offset(0),
+  _count(0),
+  _flushpoint(flushpoint),
+  _header(header) {
+  assert(this->is_acquired(), "invariant");
+  assert(0 == this->current_offset(), "invariant");
+  if (_header) {
+    reserve(sizeof(JfrCheckpointEntry));
+  }
+}
+
+static void write_checkpoint_header(u1* pos, jlong size, jlong time, bool flushpoint, juint type_count) {
+  assert(pos != NULL, "invariant");
+  JfrBigEndianWriter be_writer(pos, sizeof(JfrCheckpointEntry));
+  be_writer.write(size);
+  be_writer.write(time);
+  be_writer.write(JfrTicks::now().value() - time);
+  be_writer.write(flushpoint ? (juint)1 : (juint)0);
+  be_writer.write(type_count);
+  assert(be_writer.is_valid(), "invariant");
+}
+
+JfrCheckpointWriter::~JfrCheckpointWriter() {
+  assert(this->is_acquired(), "invariant");
+  if (!this->is_valid() || !_header) {
+    release();
+    return;
+  }
+  if (0 == count()) {
+    assert(this->used_size() == sizeof(JfrCheckpointEntry), "invariant");
+    this->seek(_offset);
+    release();
+    return;
+  }
+  assert(_header, "invariant");
+  assert(this->is_valid(), "invariant");
+  assert(count() > 0, "invariant");
+  assert(this->used_size() > sizeof(JfrCheckpointEntry), "invariant");
+  const jlong size = this->current_offset();
+  assert(size + this->start_pos() == this->current_pos(), "invariant");
+  write_checkpoint_header(const_cast<u1*>(this->start_pos()), size, _time, is_flushpoint(), count());
+  release();
+}
+
+void JfrCheckpointWriter::set_flushpoint(bool flushpoint) {
+  _flushpoint = flushpoint;
+}
+
+bool JfrCheckpointWriter::is_flushpoint() const {
+  return _flushpoint;
+}
+
+juint JfrCheckpointWriter::count() const {
+  return _count;
+}
+
+void JfrCheckpointWriter::set_count(juint count) {
+  _count = count;
+}
+
+void JfrCheckpointWriter::release() {
+  assert(this->is_acquired(), "invariant");
+  if (!this->is_valid() || this->used_size() == 0) {
+    return;
+  }
+  assert(this->used_size() > 0, "invariant");
+  // write through to backing storage
+  this->commit();
+  assert(0 == this->current_offset(), "invariant");
+}
+
+void JfrCheckpointWriter::write_type(JfrTypeId type_id) {
+  assert(type_id < TYPES_END, "invariant");
+  write<u8>(type_id);
+  increment();
+}
+
+void JfrCheckpointWriter::write_key(u8 key) {
+  write<u8>(key);
+}
+
+void JfrCheckpointWriter::increment() {
+  ++_count;
+}
+
+void JfrCheckpointWriter::write_count(u4 nof_entries) {
+  write<u4>((u4)nof_entries);
+}
+
+void JfrCheckpointWriter::write_count(u4 nof_entries, jlong offset) {
+  write_padded_at_offset(nof_entries, offset);
+}
+
+const u1* JfrCheckpointWriter::session_data(size_t* size, const JfrCheckpointContext* ctx /* 0 */) {
+  assert(this->is_acquired(), "wrong state!");
+  if (!this->is_valid()) {
+    *size = 0;
+    return NULL;
+  }
+  if (ctx != NULL) {
+    const u1* session_start_pos = this->start_pos() + ctx->offset;
+    *size = this->current_pos() - session_start_pos;
+    return session_start_pos;
+  }
+  *size = this->used_size();
+  assert(this->start_pos() + *size == this->current_pos(), "invariant");
+  write_checkpoint_header(const_cast<u1*>(this->start_pos()), this->used_offset(), _time, is_flushpoint(), count());
+  this->seek(_offset + (_header ? sizeof(JfrCheckpointEntry) : 0));
+  set_count(0);
+  return this->start_pos();
+}
+
+const JfrCheckpointContext JfrCheckpointWriter::context() const {
+  JfrCheckpointContext ctx;
+  ctx.offset = this->current_offset();
+  ctx.count = this->count();
+  return ctx;
+}
+
+void JfrCheckpointWriter::set_context(const JfrCheckpointContext ctx) {
+  this->seek(ctx.offset);
+  set_count(ctx.count);
+}
+bool JfrCheckpointWriter::has_data() const {
+  return this->used_size() > sizeof(JfrCheckpointEntry);
+}
+
+JfrCheckpointBlobHandle JfrCheckpointWriter::checkpoint_blob() {
+  size_t size = 0;
+  const u1* data = session_data(&size);
+  return JfrCheckpointBlob::make(data, size);
+}
+
+JfrCheckpointBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
+  if (ctx == NULL) {
+    return checkpoint_blob();
+  }
+  size_t size = 0;
+  const u1* data = session_data(&size, ctx);
+  return JfrCheckpointBlob::make(data, size);
+}
+
+JfrCheckpointBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
+  JfrCheckpointBlobHandle data = copy(ctx);
+  if (ctx != NULL) {
+    const_cast<JfrCheckpointContext*>(ctx)->count = 0;
+    set_context(*ctx);
+  }
+  return data;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
+
+#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "jfr/writers/jfrEventWriterHost.inline.hpp"
+#include "jfr/writers/jfrMemoryWriterHost.inline.hpp"
+#include "jfr/writers/jfrStorageAdapter.hpp"
+
+class Thread;
+
+class JfrCheckpointFlush : public StackObj {
+ public:
+  typedef JfrBuffer Type;
+  JfrCheckpointFlush(Type* old, size_t used, size_t requested, Thread* t);
+  Type* result() { return _result; }
+ private:
+  Type* _result;
+};
+
+typedef Adapter<JfrCheckpointFlush> JfrCheckpointAdapter;
+typedef AcquireReleaseMemoryWriterHost<JfrCheckpointAdapter, StackObj > JfrTransactionalCheckpointWriter;
+typedef EventWriterHost<BigEndianEncoder, CompressedIntegerEncoder, JfrTransactionalCheckpointWriter> JfrCheckpointWriterBase;
+
+struct JfrCheckpointContext {
+  jlong offset;
+  juint count;
+};
+
+class JfrCheckpointWriter : public JfrCheckpointWriterBase {
+  friend class JfrSerializerRegistration;
+ private:
+  JfrTicks _time;
+  jlong _offset;
+  juint _count;
+  bool _flushpoint;
+  bool _header;
+
+  juint count() const;
+  void set_count(juint count);
+  void increment();
+  void set_flushpoint(bool flushpoint);
+  bool is_flushpoint() const;
+  const u1* session_data(size_t* size, const JfrCheckpointContext* ctx = NULL);
+  void release();
+
+ public:
+  JfrCheckpointWriter(bool flushpoint, bool header, Thread* thread);
+  ~JfrCheckpointWriter();
+  void write_type(JfrTypeId type_id);
+  void write_count(u4 nof_entries);
+  void write_count(u4 nof_entries, jlong offset);
+  void write_key(u8 key);
+  const JfrCheckpointContext context() const;
+  void set_context(const JfrCheckpointContext ctx);
+  bool has_data() const;
+  JfrCheckpointBlobHandle checkpoint_blob();
+  JfrCheckpointBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
+  JfrCheckpointBlobHandle move(const JfrCheckpointContext* ctx = NULL);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrMetadataEvent.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "oops/klass.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/typeArrayOop.hpp"
+#include "runtime/semaphore.hpp"
+#include "runtime/thread.inline.hpp"
+
+static jbyteArray _metadata_blob = NULL;
+static Semaphore metadata_mutex_semaphore(1);
+
+void JfrMetadataEvent::lock() {
+  metadata_mutex_semaphore.wait();
+}
+
+void JfrMetadataEvent::unlock() {
+  metadata_mutex_semaphore.signal();
+}
+
+static void write_metadata_blob(JfrChunkWriter& chunkwriter, jbyteArray metadata_blob) {
+  if (metadata_blob != NULL) {
+    const typeArrayOop arr = (typeArrayOop)JfrJavaSupport::resolve_non_null(metadata_blob);
+    assert(arr != NULL, "invariant");
+    const int length = arr->length();
+    Klass* const k = arr->klass();
+    assert(k != NULL && k->oop_is_array(), "invariant");
+    const TypeArrayKlass* const byte_arr_klass = TypeArrayKlass::cast(k);
+    const jbyte* const data_address = arr->byte_at_addr(0);
+    chunkwriter.write_unbuffered(data_address, length);
+  }
+}
+
+// the semaphore is assumed to be locked  (was locked previous safepoint)
+size_t JfrMetadataEvent::write(JfrChunkWriter& chunkwriter, jlong metadata_offset) {
+  assert(chunkwriter.is_valid(), "invariant");
+  assert(chunkwriter.current_offset() == metadata_offset, "invariant");
+  // header
+  chunkwriter.reserve(sizeof(u4));
+  chunkwriter.write<u8>(EVENT_METADATA); // ID 0
+  // time data
+  chunkwriter.write(JfrTicks::now());
+  chunkwriter.write((u8)0); // duration
+  chunkwriter.write((u8)0); // metadata id
+  write_metadata_blob(chunkwriter, _metadata_blob); // payload
+  unlock(); // open up for java to provide updated metadata
+  // fill in size of metadata descriptor event
+  const jlong size_written = chunkwriter.current_offset() - metadata_offset;
+  chunkwriter.write_padded_at_offset((u4)size_written, metadata_offset);
+  return size_written;
+}
+
+void JfrMetadataEvent::update(jbyteArray metadata) {
+  JavaThread* thread = (JavaThread*)Thread::current();
+  assert(thread->is_Java_thread(), "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
+  lock();
+  if (_metadata_blob != NULL) {
+    JfrJavaSupport::destroy_global_jni_handle(_metadata_blob);
+  }
+  const oop new_desc_oop = JfrJavaSupport::resolve_non_null(metadata);
+  _metadata_blob = new_desc_oop != NULL ? (jbyteArray)JfrJavaSupport::global_jni_handle(new_desc_oop, thread) : NULL;
+  unlock();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/jfrMetadataEvent.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRMETADATAEVENT_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRMETADATAEVENT_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+
+class JfrChunkWriter;
+
+//
+// Metadata is continuously updated in Java as event classes are loaded / unloaded.
+// Using update(), Java stores a binary representation back to native.
+// This is for easy access on chunk finalization as well as having it readily available in the case of fatal error.
+//
+class JfrMetadataEvent : AllStatic {
+ public:
+  static void lock();
+  static void unlock();
+  static size_t write(JfrChunkWriter& writer, jlong metadata_offset);
+  static void update(jbyteArray metadata);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRMETADATAEVENT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/jfrThreadGroup.hpp"
+#include "jfr/utilities/jfrResourceManager.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/semaphore.hpp"
+#include "utilities/growableArray.hpp"
+
+class ThreadGroupExclusiveAccess : public StackObj {
+ private:
+  static Semaphore _mutex_semaphore;
+ public:
+  ThreadGroupExclusiveAccess() { _mutex_semaphore.wait(); }
+  ~ThreadGroupExclusiveAccess() { _mutex_semaphore.signal(); }
+};
+
+Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1);
+JfrThreadGroup* JfrThreadGroup::_instance = NULL;
+
+class JfrThreadGroupPointers : public ResourceObj {
+ private:
+  const Handle _thread_group_handle;
+  jweak _thread_group_weak_ref;
+ public:
+  JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref);
+  Handle thread_group_handle() const;
+  jweak thread_group_weak_ref() const;
+  oopDesc* const thread_group_oop() const;
+  jweak transfer_weak_global_handle_ownership();
+  void clear_weak_ref();
+};
+
+JfrThreadGroupPointers::JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref) :
+  _thread_group_handle(thread_group_handle),
+  _thread_group_weak_ref(thread_group_weak_ref) {}
+
+Handle JfrThreadGroupPointers::thread_group_handle() const {
+  return _thread_group_handle;
+}
+
+jweak JfrThreadGroupPointers::thread_group_weak_ref() const {
+  return _thread_group_weak_ref;
+}
+
+oopDesc* const JfrThreadGroupPointers::thread_group_oop() const {
+  assert(_thread_group_weak_ref == NULL ||
+         JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant");
+  return _thread_group_handle();
+}
+
+jweak JfrThreadGroupPointers::transfer_weak_global_handle_ownership() {
+  jweak temp = _thread_group_weak_ref;
+  _thread_group_weak_ref = NULL;
+  return temp;
+}
+
+void JfrThreadGroupPointers::clear_weak_ref() {
+  if (NULL != _thread_group_weak_ref) {
+    JNIHandles::destroy_weak_global(_thread_group_weak_ref);
+  }
+}
+
+class JfrThreadGroupsHelper : public ResourceObj {
+ private:
+  static const int invalid_iterator_pos = -1;
+  GrowableArray<JfrThreadGroupPointers*>* _thread_group_hierarchy;
+  int _current_iterator_pos;
+
+  int populate_thread_group_hierarchy(const JavaThread* jt, Thread* current);
+  JfrThreadGroupPointers& at(int index);
+
+ public:
+  JfrThreadGroupsHelper(const JavaThread* jt, Thread* current);
+  ~JfrThreadGroupsHelper();
+  JfrThreadGroupPointers& next();
+  bool is_valid() const;
+  bool has_next() const;
+};
+
+JfrThreadGroupsHelper::JfrThreadGroupsHelper(const JavaThread* jt, Thread* current) {
+  _thread_group_hierarchy = new GrowableArray<JfrThreadGroupPointers*>(10, false, mtTracing);
+  _current_iterator_pos = populate_thread_group_hierarchy(jt, current) - 1;
+}
+
+JfrThreadGroupsHelper::~JfrThreadGroupsHelper() {
+  assert(_current_iterator_pos == invalid_iterator_pos, "invariant");
+  for (int i = 0; i < _thread_group_hierarchy->length(); ++i) {
+    _thread_group_hierarchy->at(i)->clear_weak_ref();
+  }
+}
+
+JfrThreadGroupPointers& JfrThreadGroupsHelper::at(int index) {
+  assert(_thread_group_hierarchy != NULL, "invariant");
+  assert(index > invalid_iterator_pos && index < _thread_group_hierarchy->length(), "invariant");
+  return *(_thread_group_hierarchy->at(index));
+}
+
+bool JfrThreadGroupsHelper::has_next() const {
+  return _current_iterator_pos > invalid_iterator_pos;
+}
+
+bool JfrThreadGroupsHelper::is_valid() const {
+  return (_thread_group_hierarchy != NULL && _thread_group_hierarchy->length() > 0);
+}
+
+JfrThreadGroupPointers& JfrThreadGroupsHelper::next() {
+  assert(is_valid(), "invariant");
+  return at(_current_iterator_pos--);
+}
+
+/*
+ * If not at a safepoint, we create global weak references for
+ * all reachable threadgroups for this thread.
+ * If we are at a safepoint, the caller is the VMThread during
+ * JFR checkpointing. It can use naked oops, because nothing
+ * will move before the list of threadgroups is cleared and
+ * mutator threads restarted. The threadgroup list is cleared
+ * later by the VMThread as one of the final steps in JFR checkpointing
+ * (not here).
+ */
+int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, Thread* current) {
+  assert(jt != NULL && jt->is_Java_thread(), "invariant");
+  assert(current != NULL, "invariant");
+  assert(_thread_group_hierarchy != NULL, "invariant");
+
+  // immediate thread group
+  Handle thread_group_handle(current, java_lang_Thread::threadGroup(jt->threadObj()));
+  if (thread_group_handle == NULL) {
+    return 0;
+  }
+
+  const bool use_weak_handles = !SafepointSynchronize::is_at_safepoint();
+  jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : NULL;
+
+  JfrThreadGroupPointers* thread_group_pointers = new JfrThreadGroupPointers(thread_group_handle, thread_group_weak_ref);
+  _thread_group_hierarchy->append(thread_group_pointers);
+  // immediate parent thread group
+  oop parent_thread_group_obj = java_lang_ThreadGroup::parent(thread_group_handle());
+  Handle parent_thread_group_handle(current, parent_thread_group_obj);
+
+  // and check parents parents...
+  while (!(parent_thread_group_handle == NULL)) {
+    const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : NULL;
+    thread_group_pointers = new JfrThreadGroupPointers(parent_thread_group_handle, parent_group_weak_ref);
+    _thread_group_hierarchy->append(thread_group_pointers);
+    parent_thread_group_obj = java_lang_ThreadGroup::parent(parent_thread_group_handle());
+    parent_thread_group_handle = Handle(current, parent_thread_group_obj);
+  }
+  return _thread_group_hierarchy->length();
+}
+
+static traceid next_id() {
+  static traceid _current_threadgroup_id = 0;
+  return ++_current_threadgroup_id;
+}
+
+class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj {
+  friend class JfrThreadGroup;
+ private:
+  traceid _thread_group_id;
+  traceid _parent_group_id;
+  char* _thread_group_name; // utf8 format
+  // If an entry is created during a safepoint, the
+  // _thread_group_oop contains a direct oop to
+  // the java.lang.ThreadGroup object.
+  // If an entry is created on javathread exit time (not at safepoint),
+  // _thread_group_weak_ref contains a JNI weak global handle
+  // indirection to the java.lang.ThreadGroup object.
+  // Note: we cannot use a union here since CHECK_UNHANDLED_OOPS makes oop have
+  //       a ctor which isn't allowed in a union by the SunStudio compiler
+  oop _thread_group_oop;
+  jweak _thread_group_weak_ref;
+
+  JfrThreadGroupEntry(const char* tgstr, JfrThreadGroupPointers& ptrs);
+  ~JfrThreadGroupEntry();
+
+  traceid thread_group_id() const { return _thread_group_id; }
+  void set_thread_group_id(traceid tgid) { _thread_group_id = tgid; }
+
+  const char* const thread_group_name() const { return _thread_group_name; }
+  void set_thread_group_name(const char* tgname);
+
+  traceid parent_group_id() const { return _parent_group_id; }
+  void set_parent_group_id(traceid pgid) { _parent_group_id = pgid; }
+
+  void set_thread_group(JfrThreadGroupPointers& ptrs);
+  bool is_equal(const JfrThreadGroupPointers& ptrs) const;
+  const oop thread_group() const;
+};
+
+JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) :
+  _thread_group_id(0),
+  _parent_group_id(0),
+  _thread_group_name(NULL),
+  _thread_group_oop(NULL),
+  _thread_group_weak_ref(NULL) {
+  set_thread_group_name(tgname);
+  set_thread_group(ptrs);
+}
+
+JfrThreadGroup::JfrThreadGroupEntry::~JfrThreadGroupEntry() {
+  if (_thread_group_name != NULL) {
+    JfrCHeapObj::free(_thread_group_name, strlen(_thread_group_name) + 1);
+  }
+  if (_thread_group_weak_ref != NULL) {
+    JNIHandles::destroy_weak_global(_thread_group_weak_ref);
+  }
+}
+
+void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgname) {
+  assert(_thread_group_name == NULL, "invariant");
+  if (tgname != NULL) {
+    size_t len = strlen(tgname);
+    _thread_group_name = JfrCHeapObj::new_array<char>(len+1);
+    strncpy(_thread_group_name, tgname, len);
+    _thread_group_name[len] = '\0';
+  }
+}
+
+const oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const {
+  return _thread_group_weak_ref != NULL ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop;
+}
+
+void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group(JfrThreadGroupPointers& ptrs) {
+  _thread_group_weak_ref = ptrs.transfer_weak_global_handle_ownership();
+  if (_thread_group_weak_ref == NULL) {
+    _thread_group_oop = ptrs.thread_group_oop();
+    assert(_thread_group_oop != NULL, "invariant");
+  } else {
+    _thread_group_oop = NULL;
+  }
+}
+
+JfrThreadGroup::JfrThreadGroup() : _list(NULL) {
+  _list = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<JfrThreadGroupEntry*>(30, true);
+}
+
+JfrThreadGroup::~JfrThreadGroup() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  if (_list != NULL) {
+    for (int i = 0; i < _list->length(); i++) {
+      JfrThreadGroupEntry* e = _list->at(i);
+      delete e;
+    }
+    delete _list;
+  }
+}
+
+JfrThreadGroup* JfrThreadGroup::instance() {
+  return _instance;
+}
+
+void JfrThreadGroup::set_instance(JfrThreadGroup* new_instance) {
+  _instance = new_instance;
+}
+
+traceid JfrThreadGroup::thread_group_id(const JavaThread* jt, Thread* current) {
+  ResourceMark rm(current);
+  HandleMark hm(current);
+  JfrThreadGroupsHelper helper(jt, current);
+  return helper.is_valid() ? thread_group_id_internal(helper) : 0;
+}
+
+traceid JfrThreadGroup::thread_group_id(JavaThread* jt) {
+  assert(!JfrStream_lock->owned_by_self(), "holding stream lock but should not hold it here");
+  return thread_group_id(jt, jt);
+}
+
+traceid JfrThreadGroup::thread_group_id_internal(JfrThreadGroupsHelper& helper) {
+  ThreadGroupExclusiveAccess lock;
+  JfrThreadGroup* tg_instance = instance();
+  if (tg_instance == NULL) {
+    tg_instance = new JfrThreadGroup();
+    if (tg_instance == NULL) {
+      return 0;
+    }
+    set_instance(tg_instance);
+  }
+
+  JfrThreadGroupEntry* tge = NULL;
+  int parent_thread_group_id = 0;
+  while (helper.has_next()) {
+    JfrThreadGroupPointers& ptrs = helper.next();
+    tge = tg_instance->find_entry(ptrs);
+    if (NULL == tge) {
+      tge = tg_instance->new_entry(ptrs);
+      assert(tge != NULL, "invariant");
+      tge->set_parent_group_id(parent_thread_group_id);
+    }
+    parent_thread_group_id = tge->thread_group_id();
+  }
+  // the last entry in the hierarchy is the immediate thread group
+  return tge->thread_group_id();
+}
+
+bool JfrThreadGroup::JfrThreadGroupEntry::is_equal(const JfrThreadGroupPointers& ptrs) const {
+  return ptrs.thread_group_oop() == thread_group();
+}
+
+JfrThreadGroup::JfrThreadGroupEntry*
+JfrThreadGroup::find_entry(const JfrThreadGroupPointers& ptrs) const {
+  for (int index = 0; index < _list->length(); ++index) {
+    JfrThreadGroupEntry* curtge = _list->at(index);
+    if (curtge->is_equal(ptrs)) {
+      return curtge;
+    }
+  }
+  return (JfrThreadGroupEntry*) NULL;
+}
+
+// Assumes you already searched for the existence
+// of a corresponding entry in find_entry().
+JfrThreadGroup::JfrThreadGroupEntry*
+JfrThreadGroup::new_entry(JfrThreadGroupPointers& ptrs) {
+  typeArrayOop tg_name = java_lang_ThreadGroup::name(ptrs.thread_group_oop());
+  JfrThreadGroupEntry* const tge =
+    new JfrThreadGroupEntry(UNICODE::as_utf8((jchar*) tg_name->base(T_CHAR), tg_name->length()), ptrs);
+  add_entry(tge);
+  return tge;
+}
+
+int JfrThreadGroup::add_entry(JfrThreadGroupEntry* tge) {
+  assert(tge != NULL, "attempting to add a null entry!");
+  assert(0 == tge->thread_group_id(), "id must be unassigned!");
+  tge->set_thread_group_id(next_id());
+  return _list->append(tge);
+}
+
+void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) const {
+  assert(_list != NULL && !_list->is_empty(), "should not need be here!");
+  const int number_of_tg_entries = _list->length();
+  writer.write_count(number_of_tg_entries);
+  for (int index = 0; index < number_of_tg_entries; ++index) {
+    const JfrThreadGroupEntry* const curtge = _list->at(index);
+    writer.write_key(curtge->thread_group_id());
+    writer.write(curtge->parent_group_id());
+    writer.write(curtge->thread_group_name());
+  }
+}
+
+void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const {
+  assert(writer != NULL, "invariant");
+  assert(_list != NULL && !_list->is_empty(), "should not need be here!");
+  const int number_of_tg_entries = _list->length();
+
+  // save context
+  const JfrCheckpointContext ctx = writer->context();
+  writer->write_type(TYPE_THREADGROUP);
+  const jlong count_offset = writer->reserve(sizeof(u4)); // Don't know how many yet
+  int number_of_entries_written = 0;
+  for (int index = number_of_tg_entries - 1; index >= 0; --index) {
+    const JfrThreadGroupEntry* const curtge = _list->at(index);
+    if (thread_group_id == curtge->thread_group_id()) {
+      writer->write_key(curtge->thread_group_id());
+      writer->write(curtge->parent_group_id());
+      writer->write(curtge->thread_group_name());
+      ++number_of_entries_written;
+      thread_group_id = curtge->parent_group_id();
+    }
+  }
+  if (number_of_entries_written == 0) {
+    // nothing to write, restore context
+    writer->set_context(ctx);
+    return;
+  }
+  assert(number_of_entries_written > 0, "invariant");
+  writer->write_count(number_of_entries_written, count_offset);
+}
+
+// Write out JfrThreadGroup instance and then delete it
+void JfrThreadGroup::serialize(JfrCheckpointWriter& writer) {
+  ThreadGroupExclusiveAccess lock;
+  JfrThreadGroup* tg_instance = instance();
+  assert(tg_instance != NULL, "invariant");
+  ResourceManager<JfrThreadGroup> tg_handle(tg_instance);
+  set_instance(NULL);
+  tg_handle->write_thread_group_entries(writer);
+}
+
+// for writing a particular thread group
+void JfrThreadGroup::serialize(JfrCheckpointWriter* writer, traceid thread_group_id) {
+  assert(writer != NULL, "invariant");
+  ThreadGroupExclusiveAccess lock;
+  JfrThreadGroup* const tg_instance = instance();
+  assert(tg_instance != NULL, "invariant");
+  tg_instance->write_selective_thread_group(writer, thread_group_id);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrThreadGroup.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
+
+#include "jni.h"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+class JfrCheckpointWriter;
+template <typename>
+class GrowableArray;
+class JfrThreadGroupsHelper;
+class JfrThreadGroupPointers;
+
+class JfrThreadGroup : public JfrCHeapObj {
+  friend class JfrCheckpointThreadClosure;
+ private:
+  static JfrThreadGroup* _instance;
+  class JfrThreadGroupEntry;
+  GrowableArray<JfrThreadGroupEntry*>* _list;
+
+  JfrThreadGroup();
+  JfrThreadGroupEntry* find_entry(const JfrThreadGroupPointers& ptrs) const;
+  JfrThreadGroupEntry* new_entry(JfrThreadGroupPointers& ptrs);
+  int add_entry(JfrThreadGroupEntry* const tge);
+
+  void write_thread_group_entries(JfrCheckpointWriter& writer) const;
+  void write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const;
+
+  static traceid thread_group_id_internal(JfrThreadGroupsHelper& helper);
+  static JfrThreadGroup* instance();
+  static void set_instance(JfrThreadGroup* new_instance);
+
+ public:
+  ~JfrThreadGroup();
+  static void serialize(JfrCheckpointWriter& w);
+  static void serialize(JfrCheckpointWriter* w, traceid thread_group_id);
+  static traceid thread_group_id(JavaThread* thread);
+  static traceid thread_group_id(const JavaThread* thread, Thread* current);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrThreadState.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,82 @@
+/*
+* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/types/jfrThreadState.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jvmtifiles/jvmti.h"
+
+struct jvmti_thread_state {
+  u8 id;
+  const char* description;
+};
+
+static jvmti_thread_state states[] = {
+  {
+    JVMTI_JAVA_LANG_THREAD_STATE_NEW,
+    "STATE_NEW"
+  },
+  {
+    JVMTI_THREAD_STATE_TERMINATED,
+    "STATE_TERMINATED"
+  },
+  {
+    JVMTI_JAVA_LANG_THREAD_STATE_RUNNABLE,
+    "STATE_RUNNABLE"
+  },
+  {
+    (JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT | JVMTI_THREAD_STATE_SLEEPING),
+    "STATE_SLEEPING"
+  },
+  {
+    (JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_INDEFINITELY | JVMTI_THREAD_STATE_IN_OBJECT_WAIT),
+    "STATE_IN_OBJECT_WAIT"
+  },
+  {
+    (JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT | JVMTI_THREAD_STATE_IN_OBJECT_WAIT),
+    "STATE_IN_OBJECT_WAIT_TIMED"
+  },
+  {
+    (JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_INDEFINITELY | JVMTI_THREAD_STATE_PARKED),
+    "STATE_PARKED"
+  },
+  {
+    (JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT | JVMTI_THREAD_STATE_PARKED),
+    "STATE_PARKED_TIMED"
+  },
+  {
+    JVMTI_JAVA_LANG_THREAD_STATE_BLOCKED,
+    "STATE_BLOCKED_ON_MONITOR_ENTER"
+  }
+};
+
+void JfrThreadState::serialize(JfrCheckpointWriter& writer) {
+  const u4 number_of_states = sizeof(states) / sizeof(jvmti_thread_state);
+  writer.write_count(number_of_states);
+  for (u4 i = 0; i < number_of_states; ++i) {
+    writer.write_key(states[i].id);
+    writer.write(states[i].description);
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrThreadState.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,37 @@
+/*
+* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADSTATE_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADSTATE_HPP
+
+#include "memory/allocation.hpp"
+
+class JfrCheckpointWriter;
+
+class JfrThreadState : public AllStatic {
+ public:
+  static void serialize(JfrCheckpointWriter& writer);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADSTATE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "code/codeBlob.hpp"
+#include "code/codeCache.hpp"
+#include "gc_interface/gcCause.hpp"
+#include "gc_interface/gcName.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/checkpoint/types/jfrType.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/types/jfrThreadGroup.hpp"
+#include "jfr/recorder/checkpoint/types/jfrThreadState.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "jfr/writers/jfrJavaEventWriter.hpp"
+#include "memory/metaspaceGCThresholdUpdater.hpp"
+#include "memory/referenceType.hpp"
+#include "memory/universe.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/osThread.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/vm_operations.hpp"
+
+#ifdef COMPILER2
+#include "opto/compile.hpp"
+#include "opto/node.hpp"
+#endif
+#if INCLUDE_ALL_GCS
+//#include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
+#include "gc_implementation/g1/g1YCTypes.hpp"
+#endif
+
+// Requires a ResourceMark for get_thread_name/as_utf8
+class JfrCheckpointThreadClosure : public ThreadClosure {
+ private:
+  JfrCheckpointWriter& _writer;
+  JfrCheckpointContext _ctx;
+  const intptr_t _count_position;
+  Thread* const _curthread;
+  u4 _count;
+
+ public:
+  JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer),
+                                                            _ctx(writer.context()),
+                                                            _count_position(writer.reserve(sizeof(u4))),
+                                                            _curthread(Thread::current()),
+                                                            _count(0) {
+  }
+
+  ~JfrCheckpointThreadClosure() {
+    if (_count == 0) {
+      // restore
+      _writer.set_context(_ctx);
+      return;
+    }
+    _writer.write_count(_count, _count_position);
+  }
+
+  void do_thread(Thread* t);
+};
+
+// Requires a ResourceMark for get_thread_name/as_utf8
+void JfrCheckpointThreadClosure::do_thread(Thread* t) {
+  assert(t != NULL, "invariant");
+  assert_locked_or_safepoint(Threads_lock);
+  const JfrThreadLocal* const tl = t->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  if (tl->is_dead()) {
+    return;
+  }
+  ++_count;
+  _writer.write_key(tl->thread_id());
+  _writer.write(t->name());
+  const OSThread* const os_thread = t->osthread();
+  _writer.write<traceid>(os_thread != NULL ? os_thread->thread_id() : 0);
+  if (t->is_Java_thread()) {
+    JavaThread* const jt = (JavaThread*)t;
+    _writer.write(jt->name());
+    _writer.write(java_lang_Thread::thread_id(jt->threadObj()));
+    _writer.write(JfrThreadGroup::thread_group_id(jt, _curthread));
+    // since we are iterating threads during a safepoint, also issue notification
+    JfrJavaEventWriter::notify(jt);
+    return;
+  }
+  _writer.write((const char*)NULL); // java name
+  _writer.write((traceid)0); // java thread id
+  _writer.write((traceid)0); // java thread group
+}
+
+void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  JfrCheckpointThreadClosure tc(writer);
+  Threads::threads_do(&tc);
+}
+
+void JfrThreadGroupConstant::serialize(JfrCheckpointWriter& writer) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  JfrThreadGroup::serialize(writer);
+}
+
+static const char* flag_value_origin_to_string(Flag::Flags origin) {
+  switch (origin) {
+    case Flag::DEFAULT: return "Default";
+    case Flag::COMMAND_LINE: return "Command line";
+    case Flag::ENVIRON_VAR: return "Environment variable";
+    case Flag::CONFIG_FILE: return "Config file";
+    case Flag::MANAGEMENT: return "Management";
+    case Flag::ERGONOMIC: return "Ergonomic";
+    case Flag::ATTACH_ON_DEMAND: return "Attach on demand";
+    case Flag::INTERNAL: return "Internal";
+    default: ShouldNotReachHere(); return "";
+  }
+}
+
+void FlagValueOriginConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = Flag::LAST_VALUE_ORIGIN + 1;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(flag_value_origin_to_string((Flag::Flags)i));
+  }
+}
+
+void MonitorInflateCauseConstant::serialize(JfrCheckpointWriter& writer) {
+  // XXX no such counters. implement?
+//  static const u4 nof_entries = ObjectSynchronizer::inflate_cause_nof;
+//  writer.write_count(nof_entries);
+//  for (u4 i = 0; i < nof_entries; ++i) {
+//    writer.write_key(i);
+//    writer.write(ObjectSynchronizer::inflate_cause_name((ObjectSynchronizer::InflateCause)i));
+//  }
+}
+
+void GCCauseConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = GCCause::_last_gc_cause;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(GCCause::to_string((GCCause::Cause)i));
+  }
+}
+
+void GCNameConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = GCNameEndSentinel;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(GCNameHelper::to_string((GCName)i));
+  }
+}
+
+void GCWhenConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = GCWhen::GCWhenEndSentinel;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(GCWhen::to_string((GCWhen::Type)i));
+  }
+}
+
+void G1HeapRegionTypeConstant::serialize(JfrCheckpointWriter& writer) {
+  // XXX TODO?
+//  static const u4 nof_entries = G1HeapRegionTraceType::G1HeapRegionTypeEndSentinel;
+//  writer.write_count(nof_entries);
+//  for (u4 i = 0; i < nof_entries; ++i) {
+//    writer.write_key(i);
+//    writer.write(G1HeapRegionTraceType::to_string((G1HeapRegionTraceType::Type)i));
+//  }
+}
+
+void GCThresholdUpdaterConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = MetaspaceGCThresholdUpdater::Last;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(MetaspaceGCThresholdUpdater::to_string((MetaspaceGCThresholdUpdater::Type)i));
+  }
+}
+
+void MetadataTypeConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = Metaspace::MetadataTypeCount;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(Metaspace::metadata_type_name((Metaspace::MetadataType)i));
+  }
+}
+
+void MetaspaceObjectTypeConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = MetaspaceObj::_number_of_types;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(MetaspaceObj::type_name((MetaspaceObj::Type)i));
+  }
+}
+
+void G1YCTypeConstant::serialize(JfrCheckpointWriter& writer) {
+#if INCLUDE_ALL_GCS
+  static const u4 nof_entries = G1YCTypeEndSentinel;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(G1YCTypeHelper::to_string((G1YCType)i));
+  }
+#endif
+}
+
+static const char* reference_type_to_string(ReferenceType rt) {
+  switch (rt) {
+    case REF_NONE: return "None reference";
+    case REF_OTHER: return "Other reference";
+    case REF_SOFT: return "Soft reference";
+    case REF_WEAK: return "Weak reference";
+    case REF_FINAL: return "Final reference";
+    case REF_PHANTOM: return "Phantom reference";
+    default:
+      ShouldNotReachHere();
+    return NULL;
+  }
+}
+
+void ReferenceTypeConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = REF_PHANTOM + 1;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(reference_type_to_string((ReferenceType)i));
+  }
+}
+
+void NarrowOopModeConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = Universe::HeapBasedNarrowOop + 1;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(Universe::narrow_oop_mode_to_string((Universe::NARROW_OOP_MODE)i));
+  }
+}
+
+void CompilerPhaseTypeConstant::serialize(JfrCheckpointWriter& writer) {
+#ifdef COMPILER2
+  static const u4 nof_entries = PHASE_NUM_TYPES;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(CompilerPhaseTypeHelper::to_string((CompilerPhaseType)i));
+  }
+#endif
+}
+
+void CodeBlobTypeConstant::serialize(JfrCheckpointWriter& writer) {
+  // XXX no code blob types. need to send any stub value?
+//  static const u4 nof_entries = CodeBlobType::NumTypes;
+//  writer.write_count(nof_entries);
+//  for (u4 i = 0; i < nof_entries; ++i) {
+//    writer.write_key(i);
+//    writer.write(CodeCache::get_code_heap_name(i));
+//  }
+};
+
+void VMOperationTypeConstant::serialize(JfrCheckpointWriter& writer) {
+  static const u4 nof_entries = VM_Operation::VMOp_Terminating;
+  writer.write_count(nof_entries);
+  for (u4 i = 0; i < nof_entries; ++i) {
+    writer.write_key(i);
+    writer.write(VM_Operation::name(VM_Operation::VMOp_Type(i)));
+  }
+}
+
+class TypeSetSerialization {
+ private:
+  bool _class_unload;
+ public:
+  explicit TypeSetSerialization(bool class_unload) : _class_unload(class_unload) {}
+  void write(JfrCheckpointWriter& writer, JfrCheckpointWriter* leakp_writer) {
+    JfrTypeSet::serialize(&writer, leakp_writer, _class_unload);
+  }
+};
+
+void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
+  TypeSetSerialization type_set(true);
+  if (LeakProfiler::is_running()) {
+    JfrCheckpointWriter leakp_writer(false, true, Thread::current());
+    type_set.write(writer, &leakp_writer);
+    ObjectSampleCheckpoint::install(leakp_writer, true, true);
+    return;
+  }
+  type_set.write(writer, NULL);
+};
+
+void TypeSet::serialize(JfrCheckpointWriter& writer) {
+  TypeSetSerialization type_set(false);
+  if (LeakProfiler::is_suspended()) {
+    JfrCheckpointWriter leakp_writer(false, true, Thread::current());
+    type_set.write(writer, &leakp_writer);
+    ObjectSampleCheckpoint::install(leakp_writer, false, true);
+    return;
+  }
+  type_set.write(writer, NULL);
+};
+
+void ThreadStateConstant::serialize(JfrCheckpointWriter& writer) {
+  JfrThreadState::serialize(writer);
+}
+
+void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) {
+  assert(_thread != NULL, "invariant");
+  assert(_thread == Thread::current(), "invariant");
+  assert(_thread->is_Java_thread(), "invariant");
+  assert(!_thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+  ResourceMark rm(_thread);
+  const oop threadObj = _thread->threadObj();
+  assert(threadObj != NULL, "invariant");
+  const u8 java_lang_thread_id = java_lang_Thread::thread_id(threadObj);
+  const char* const thread_name = _thread->name();
+  const traceid thread_group_id = JfrThreadGroup::thread_group_id(_thread);
+  writer.write_count(1);
+  writer.write_key(_thread->jfr_thread_local()->thread_id());
+  writer.write(thread_name);
+  writer.write((traceid)_thread->osthread()->thread_id());
+  writer.write(thread_name);
+  writer.write(java_lang_thread_id);
+  writer.write(thread_group_id);
+  JfrThreadGroup::serialize(&writer, thread_group_id);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrType.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPE_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPE_HPP
+
+#include "jfr/metadata/jfrSerializer.hpp"
+
+class JfrThreadConstantSet : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class JfrThreadGroupConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class ClassUnloadTypeSet : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class FlagValueOriginConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class MonitorInflateCauseConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class GCCauseConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class GCNameConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class GCWhenConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class G1HeapRegionTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class GCThresholdUpdaterConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class MetadataTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class MetaspaceObjectTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class G1YCTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class ReferenceTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class NarrowOopModeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class CompilerPhaseTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class CodeBlobTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class VMOperationTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class TypeSet : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class ThreadStateConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class JfrThreadConstant : public JfrSerializer {
+ private:
+  JavaThread* _thread;
+ public:
+  JfrThreadConstant(JavaThread* jt) : _thread(jt) {}
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+#endif // SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/metadata/jfrSerializer.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/jfrType.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
+#include "jfr/utilities/jfrDoublyLinkedList.hpp"
+#include "jfr/utilities/jfrIterator.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/exceptions.hpp"
+#include "runtime/semaphore.hpp"
+
+class JfrSerializerRegistration : public JfrCHeapObj {
+ private:
+  JfrSerializerRegistration* _next;
+  JfrSerializerRegistration* _prev;
+  JfrSerializer* _serializer;
+  mutable JfrCheckpointBlobHandle _cache;
+  JfrTypeId _id;
+  bool _permit_cache;
+
+ public:
+  JfrSerializerRegistration(JfrTypeId id, bool permit_cache, JfrSerializer* serializer) :
+    _next(NULL), _prev(NULL), _serializer(serializer), _cache(), _id(id), _permit_cache(permit_cache) {}
+
+  ~JfrSerializerRegistration() {
+    delete _serializer;
+  }
+
+  JfrSerializerRegistration* next() const {
+    return _next;
+  }
+
+  void set_next(JfrSerializerRegistration* next) {
+    _next = next;
+  }
+
+  JfrSerializerRegistration* prev() const {
+    return _prev;
+  }
+
+  void set_prev(JfrSerializerRegistration* prev) {
+    _prev = prev;
+  }
+
+  JfrTypeId id() const {
+    return _id;
+  }
+
+  void invoke(JfrCheckpointWriter& writer) const;
+};
+
+void JfrSerializerRegistration::invoke(JfrCheckpointWriter& writer) const {
+  if (_cache.valid()) {
+    writer.increment();
+    _cache->write(writer);
+    return;
+  }
+  const JfrCheckpointContext ctx = writer.context();
+  // serialize the type id before invoking callback
+  writer.write_type(_id);
+  const intptr_t start = writer.current_offset();
+  // invoke the serializer routine
+  _serializer->serialize(writer);
+  if (start == writer.current_offset() ) {
+    // the serializer implementation did nothing, rewind to restore
+    writer.set_context(ctx);
+    return;
+  }
+  if (_permit_cache) {
+    _cache = writer.copy(&ctx);
+  }
+}
+
+class SerializerRegistrationGuard : public StackObj {
+ private:
+  static Semaphore _mutex_semaphore;
+ public:
+  SerializerRegistrationGuard() {
+    _mutex_semaphore.wait();
+  }
+  ~SerializerRegistrationGuard() {
+    _mutex_semaphore.signal();
+  }
+};
+
+Semaphore SerializerRegistrationGuard::_mutex_semaphore(1);
+
+typedef JfrDoublyLinkedList<JfrSerializerRegistration> List;
+typedef StopOnNullIterator<const List> Iterator;
+static List types;
+static List safepoint_types;
+
+void JfrTypeManager::clear() {
+  SerializerRegistrationGuard guard;
+  Iterator iter(types);
+  JfrSerializerRegistration* registration;
+  while (iter.has_next()) {
+    registration = types.remove(iter.next());
+    assert(registration != NULL, "invariant");
+    delete registration;
+  }
+  Iterator sp_type_iter(safepoint_types);
+  while (sp_type_iter.has_next()) {
+    registration = safepoint_types.remove(sp_type_iter.next());
+    assert(registration != NULL, "invariant");
+    delete registration;
+  }
+}
+
+void JfrTypeManager::write_types(JfrCheckpointWriter& writer) {
+  const Iterator iter(types);
+  while (iter.has_next()) {
+    iter.next()->invoke(writer);
+  }
+}
+
+void JfrTypeManager::write_safepoint_types(JfrCheckpointWriter& writer) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  const Iterator iter(safepoint_types);
+  while (iter.has_next()) {
+    iter.next()->invoke(writer);
+  }
+}
+
+void JfrTypeManager::write_type_set() {
+  // can safepoint here because of PackageTable_lock
+  MutexLockerEx lock(SafepointSynchronize::is_at_safepoint() ? NULL : PackageTable_lock);
+  JfrCheckpointWriter writer(true, true, Thread::current());
+  TypeSet set;
+  set.serialize(writer);
+}
+
+void JfrTypeManager::write_type_set_for_unloaded_classes() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  JfrCheckpointWriter writer(false, true, Thread::current());
+  ClassUnloadTypeSet class_unload_set;
+  class_unload_set.serialize(writer);
+}
+
+void JfrTypeManager::create_thread_checkpoint(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  JfrThreadConstant type_thread(jt);
+  JfrCheckpointWriter writer(false, true, jt);
+  writer.write_type(TYPE_THREAD);
+  type_thread.serialize(writer);
+  // create and install a checkpoint blob
+  jt->jfr_thread_local()->set_thread_checkpoint(writer.checkpoint_blob());
+  assert(jt->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+}
+
+void JfrTypeManager::write_thread_checkpoint(JavaThread* jt) {
+  assert(jt != NULL, "JavaThread is NULL!");
+  ResourceMark rm(jt);
+  if (jt->jfr_thread_local()->has_thread_checkpoint()) {
+    JfrCheckpointWriter writer(false, false, jt);
+    jt->jfr_thread_local()->thread_checkpoint()->write(writer);
+  } else {
+    JfrThreadConstant type_thread(jt);
+    JfrCheckpointWriter writer(false, true, jt);
+    writer.write_type(TYPE_THREAD);
+    type_thread.serialize(writer);
+  }
+}
+
+#ifdef ASSERT
+static void assert_not_registered_twice(JfrTypeId id, List& list) {
+  const Iterator iter(list);
+  while (iter.has_next()) {
+    assert(iter.next()->id() != id, "invariant");
+  }
+}
+#endif
+
+static bool register_type(JfrTypeId id, bool require_safepoint, bool permit_cache, JfrSerializer* serializer) {
+  assert(serializer != NULL, "invariant");
+  JfrSerializerRegistration* const registration = new JfrSerializerRegistration(id, permit_cache, serializer);
+  if (registration == NULL) {
+    delete serializer;
+    return false;
+  }
+  if (require_safepoint) {
+    assert(!safepoint_types.in_list(registration), "invariant");
+    DEBUG_ONLY(assert_not_registered_twice(id, safepoint_types);)
+    safepoint_types.prepend(registration);
+  } else {
+    assert(!types.in_list(registration), "invariant");
+    DEBUG_ONLY(assert_not_registered_twice(id, types);)
+    types.prepend(registration);
+  }
+  return true;
+}
+
+bool JfrTypeManager::initialize() {
+  SerializerRegistrationGuard guard;
+
+  // register non-safepointing type serialization
+  register_type(TYPE_FLAGVALUEORIGIN, false, true, new FlagValueOriginConstant());
+  register_type(TYPE_INFLATECAUSE, false, true, new MonitorInflateCauseConstant());
+  register_type(TYPE_GCCAUSE, false, true, new GCCauseConstant());
+  register_type(TYPE_GCNAME, false, true, new GCNameConstant());
+  register_type(TYPE_GCWHEN, false, true, new GCWhenConstant());
+  register_type(TYPE_G1HEAPREGIONTYPE, false, true, new G1HeapRegionTypeConstant());
+  register_type(TYPE_GCTHRESHOLDUPDATER, false, true, new GCThresholdUpdaterConstant());
+  register_type(TYPE_METADATATYPE, false, true, new MetadataTypeConstant());
+  register_type(TYPE_METASPACEOBJECTTYPE, false, true, new MetaspaceObjectTypeConstant());
+  register_type(TYPE_G1YCTYPE, false, true, new G1YCTypeConstant());
+  register_type(TYPE_REFERENCETYPE, false, true, new ReferenceTypeConstant());
+  register_type(TYPE_NARROWOOPMODE, false, true, new NarrowOopModeConstant());
+  register_type(TYPE_COMPILERPHASETYPE, false, true, new CompilerPhaseTypeConstant());
+  register_type(TYPE_CODEBLOBTYPE, false, true, new CodeBlobTypeConstant());
+  register_type(TYPE_VMOPERATIONTYPE, false, true, new VMOperationTypeConstant());
+  register_type(TYPE_THREADSTATE, false, true, new ThreadStateConstant());
+
+  // register safepointing type serialization
+  register_type(TYPE_THREADGROUP, true, false, new JfrThreadGroupConstant());
+  register_type(TYPE_THREAD, true, false, new JfrThreadConstantSet());
+  return true;
+}
+
+// implementation for the static registration function exposed in the JfrSerializer api
+bool JfrSerializer::register_serializer(JfrTypeId id, bool require_safepoint, bool permit_cache, JfrSerializer* serializer) {
+  SerializerRegistrationGuard guard;
+  return register_type(id, require_safepoint, permit_cache, serializer);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrTypeManager.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPEMANAGER_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPEMANAGER_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JavaThread;
+class JfrCheckpointWriter;
+
+class JfrTypeManager : public AllStatic {
+ public:
+  static bool initialize();
+  static void clear();
+  static void write_types(JfrCheckpointWriter& writer);
+  static void write_safepoint_types(JfrCheckpointWriter& writer);
+  static void write_type_set();
+  static void write_type_set_for_unloaded_classes();
+  static void create_thread_checkpoint(JavaThread* jt);
+  static void write_thread_checkpoint(JavaThread* jt);
+};
+
+#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPEMANAGER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSet.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,901 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/javaClasses.hpp"
+// XXX #include "classfile/packageEntry.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/jni/jfrGetAllEventClasses.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/utilities/jfrHashtable.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/iterator.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "utilities/accessFlags.hpp"
+
+// incremented on each checkpoint
+static u8 checkpoint_id = 0;
+
+// creates a unique id by combining a checkpoint relative symbol id (2^24)
+// with the current checkpoint id (2^40)
+#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id)))
+
+typedef const Klass* KlassPtr;
+// XXX typedef const PackageEntry* PkgPtr;
+typedef const ClassLoaderData* CldPtr;
+typedef const Method* MethodPtr;
+typedef const Symbol* SymbolPtr;
+typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
+typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
+
+// XXX
+// static traceid package_id(KlassPtr klass) {
+//   assert(klass != NULL, "invariant");
+//   PkgPtr pkg_entry = klass->package();
+//   return pkg_entry == NULL ? 0 : TRACE_ID(pkg_entry);
+// }
+
+static traceid cld_id(CldPtr cld) {
+  assert(cld != NULL, "invariant");
+  return cld->is_anonymous() ? 0 : TRACE_ID(cld);
+}
+
+static void tag_leakp_klass_artifacts(KlassPtr k, bool class_unload) {
+  assert(k != NULL, "invariant");
+  // XXX
+  // PkgPtr pkg = k->package();
+  // if (pkg != NULL) {
+  //   tag_leakp_artifact(pkg, class_unload);
+  // }
+  CldPtr cld = k->class_loader_data();
+  assert(cld != NULL, "invariant");
+  if (!cld->is_anonymous()) {
+    tag_leakp_artifact(cld, class_unload);
+  }
+}
+
+class TagLeakpKlassArtifact {
+  bool _class_unload;
+ public:
+  TagLeakpKlassArtifact(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(KlassPtr klass) {
+    if (_class_unload) {
+      if (LEAKP_USED_THIS_EPOCH(klass)) {
+        tag_leakp_klass_artifacts(klass, _class_unload);
+      }
+    } else {
+      if (LEAKP_USED_PREV_EPOCH(klass)) {
+        tag_leakp_klass_artifacts(klass, _class_unload);
+      }
+    }
+    return true;
+  }
+};
+
+/*
+ * In C++03, functions used as template parameters must have external linkage;
+ * this restriction was removed in C++11. Change back to "static" and
+ * rename functions when C++11 becomes available.
+ *
+ * The weird naming is an effort to decrease the risk of name clashes.
+ */
+
+int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
+  assert(writer != NULL, "invariant");
+  assert(artifacts != NULL, "invariant");
+  assert(k != NULL, "invariant");
+  KlassPtr klass = (KlassPtr)k;
+  traceid pkg_id = 0;
+  KlassPtr theklass = klass;
+  if (theklass->oop_is_objArray()) {
+    const ObjArrayKlass* obj_arr_klass = ObjArrayKlass::cast((Klass*)klass);
+    theklass = obj_arr_klass->bottom_klass();
+  }
+  if (theklass->oop_is_instance()) {
+    pkg_id = 0; // XXX package_id(theklass);
+  } else {
+    assert(theklass->oop_is_typeArray(), "invariant");
+  }
+  const traceid symbol_id = artifacts->mark(klass);
+  assert(symbol_id > 0, "need to have an address for symbol!");
+  writer->write(TRACE_ID(klass));
+  writer->write(cld_id(klass->class_loader_data()));
+  writer->write((traceid)CREATE_SYMBOL_ID(symbol_id));
+  writer->write(pkg_id);
+  writer->write((s4)klass->access_flags().get_flags());
+  return 1;
+}
+
+typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
+typedef JfrPredicatedArtifactWriterImplHost<KlassPtr, LeakKlassPredicate, write__artifact__klass> LeakKlassWriterImpl;
+typedef JfrArtifactWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
+typedef JfrArtifactWriterImplHost<KlassPtr, write__artifact__klass> KlassWriterImpl;
+typedef JfrArtifactWriterHost<KlassWriterImpl, TYPE_CLASS> KlassWriter;
+
+int write__artifact__method(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
+  assert(writer != NULL, "invariant");
+  assert(artifacts != NULL, "invariant");
+  assert(m != NULL, "invariant");
+  MethodPtr method = (MethodPtr)m;
+  const traceid method_name_symbol_id = artifacts->mark(method->name());
+  assert(method_name_symbol_id > 0, "invariant");
+  const traceid method_sig_symbol_id = artifacts->mark(method->signature());
+  assert(method_sig_symbol_id > 0, "invariant");
+  KlassPtr klass = method->method_holder();
+  assert(klass != NULL, "invariant");
+  assert(METHOD_USED_ANY_EPOCH(klass), "invariant");
+  writer->write((u8)METHOD_ID(klass, method));
+  writer->write((u8)TRACE_ID(klass));
+  writer->write((u8)CREATE_SYMBOL_ID(method_name_symbol_id));
+  writer->write((u8)CREATE_SYMBOL_ID(method_sig_symbol_id));
+  writer->write((u2)method->access_flags().get_flags());
+  writer->write(const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0);
+  return 1;
+}
+
+typedef JfrArtifactWriterImplHost<MethodPtr, write__artifact__method> MethodWriterImplTarget;
+typedef JfrArtifactWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
+
+// XXX
+// int write__artifact__package(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* p) {
+//   assert(writer != NULL, "invariant");
+//   assert(artifacts != NULL, "invariant");
+//   assert(p != NULL, "invariant");
+//   PkgPtr pkg = (PkgPtr)p;
+//   Symbol* const pkg_name = pkg->name();
+//   const traceid package_name_symbol_id = pkg_name != NULL ? artifacts->mark(pkg_name) : 0;
+//   assert(package_name_symbol_id > 0, "invariant");
+//   writer->write((traceid)TRACE_ID(pkg));
+//   writer->write((traceid)CREATE_SYMBOL_ID(package_name_symbol_id));
+//   writer->write((bool)pkg->is_exported());
+//   return 1;
+// }
+
+// typedef LeakPredicate<PkgPtr> LeakPackagePredicate;
+// int _compare_pkg_ptr_(PkgPtr const& lhs, PkgPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
+// typedef UniquePredicate<PkgPtr, _compare_pkg_ptr_> PackagePredicate;
+// typedef JfrPredicatedArtifactWriterImplHost<PkgPtr, LeakPackagePredicate, write__artifact__package> LeakPackageWriterImpl;
+// typedef JfrPredicatedArtifactWriterImplHost<PkgPtr, PackagePredicate, write__artifact__package> PackageWriterImpl;
+// typedef JfrArtifactWriterHost<LeakPackageWriterImpl, TYPE_PACKAGE> LeakPackageWriter;
+// typedef JfrArtifactWriterHost<PackageWriterImpl, TYPE_PACKAGE> PackageWriter;
+
+int write__artifact__classloader(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* c) {
+  assert(c != NULL, "invariant");
+  CldPtr cld = (CldPtr)c;
+  assert(!cld->is_anonymous(), "invariant");
+  const traceid cld_id = TRACE_ID(cld);
+  // class loader type
+  const Klass* class_loader_klass = cld->class_loader() != NULL ? cld->class_loader()->klass() : NULL;
+  if (class_loader_klass == NULL) {
+    // (primordial) boot class loader
+    writer->write(cld_id); // class loader instance id
+    writer->write((traceid)0);  // class loader type id (absence of)
+    writer->write((traceid)CREATE_SYMBOL_ID(1)); // 1 maps to synthetic name -> "bootstrap"
+  } else {
+    Symbol* symbol_name = class_loader_klass->name();
+    const traceid symbol_name_id = symbol_name != NULL ? artifacts->mark(symbol_name) : 0;
+    writer->write(cld_id); // class loader instance id
+    writer->write(TRACE_ID(class_loader_klass)); // class loader type id
+    writer->write(symbol_name_id == 0 ? (traceid)0 :
+      (traceid)CREATE_SYMBOL_ID(symbol_name_id)); // class loader instance name
+  }
+  return 1;
+}
+
+typedef LeakPredicate<CldPtr> LeakCldPredicate;
+int _compare_cld_ptr_(CldPtr const& lhs, CldPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
+typedef UniquePredicate<CldPtr, _compare_cld_ptr_> CldPredicate;
+typedef JfrPredicatedArtifactWriterImplHost<CldPtr, LeakCldPredicate, write__artifact__classloader> LeakCldWriterImpl;
+typedef JfrPredicatedArtifactWriterImplHost<CldPtr, CldPredicate, write__artifact__classloader> CldWriterImpl;
+typedef JfrArtifactWriterHost<LeakCldWriterImpl, TYPE_CLASSLOADER> LeakCldWriter;
+typedef JfrArtifactWriterHost<CldWriterImpl, TYPE_CLASSLOADER> CldWriter;
+
+typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
+
+static int write__artifact__symbol__entry__(JfrCheckpointWriter* writer,
+                                            SymbolEntryPtr entry) {
+  assert(writer != NULL, "invariant");
+  assert(entry != NULL, "invariant");
+  ResourceMark rm;
+  writer->write(CREATE_SYMBOL_ID(entry->id()));
+  writer->write(entry->value()->as_C_string());
+  return 1;
+}
+
+int write__artifact__symbol__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
+  assert(e != NULL, "invariant");
+  return write__artifact__symbol__entry__(writer, (SymbolEntryPtr)e);
+}
+
+typedef JfrArtifactWriterImplHost<SymbolEntryPtr, write__artifact__symbol__entry> SymbolEntryWriterImpl;
+typedef JfrArtifactWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
+
+typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
+
+static int write__artifact__cstring__entry__(JfrCheckpointWriter* writer, CStringEntryPtr entry) {
+  assert(writer != NULL, "invariant");
+  assert(entry != NULL, "invariant");
+  writer->write(CREATE_SYMBOL_ID(entry->id()));
+  writer->write(entry->value());
+  return 1;
+}
+
+int write__artifact__cstring__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
+  assert(e != NULL, "invariant");
+  return write__artifact__cstring__entry__(writer, (CStringEntryPtr)e);
+}
+
+typedef JfrArtifactWriterImplHost<CStringEntryPtr, write__artifact__cstring__entry> CStringEntryWriterImpl;
+typedef JfrArtifactWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
+
+int write__artifact__klass__symbol(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
+  assert(writer != NULL, "invariant");
+  assert(artifacts != NULL, "invaiant");
+  assert(k != NULL, "invariant");
+  const InstanceKlass* const ik = (const InstanceKlass*)k;
+  if (ik->is_anonymous()) {
+    CStringEntryPtr entry =
+      artifacts->map_cstring(JfrSymbolId::anonymous_klass_name_hash_code(ik));
+    assert(entry != NULL, "invariant");
+    return write__artifact__cstring__entry__(writer, entry);
+  }
+
+  SymbolEntryPtr entry = artifacts->map_symbol(JfrSymbolId::regular_klass_name_hash_code(ik));
+  return write__artifact__symbol__entry__(writer, entry);
+}
+
+int _compare_traceid_(const traceid& lhs, const traceid& rhs) {
+  return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
+}
+
+template <template <typename> class Predicate>
+class KlassSymbolWriterImpl {
+ private:
+  JfrCheckpointWriter* _writer;
+  JfrArtifactSet* _artifacts;
+  Predicate<KlassPtr> _predicate;
+  MethodUsedPredicate<true> _method_used_predicate;
+  MethodFlagPredicate _method_flag_predicate;
+  UniquePredicate<traceid, _compare_traceid_> _unique_predicate;
+
+  int klass_symbols(KlassPtr klass);
+// XXX  int package_symbols(PkgPtr pkg);
+  int class_loader_symbols(CldPtr cld);
+  int method_symbols(KlassPtr klass);
+
+ public:
+  typedef KlassPtr Type;
+  KlassSymbolWriterImpl(JfrCheckpointWriter* writer,
+                        JfrArtifactSet* artifacts,
+                        bool class_unload) : _writer(writer),
+                                             _artifacts(artifacts),
+                                             _predicate(class_unload),
+                                             _method_used_predicate(class_unload),
+                                             _method_flag_predicate(class_unload),
+                                             _unique_predicate(class_unload) {}
+
+  int operator()(KlassPtr klass) {
+    assert(klass != NULL, "invariant");
+    int count = 0;
+    if (_predicate(klass)) {
+      count += klass_symbols(klass);
+      // XXX
+      // PkgPtr pkg = klass->package();
+      // if (pkg != NULL) {
+      //   count += package_symbols(pkg);
+      // }
+      CldPtr cld = klass->class_loader_data();
+      assert(cld != NULL, "invariant");
+      if (!cld->is_anonymous()) {
+        count += class_loader_symbols(cld);
+      }
+      if (_method_used_predicate(klass)) {
+        count += method_symbols(klass);
+      }
+    }
+    return count;
+  }
+};
+
+template <template <typename> class Predicate>
+int KlassSymbolWriterImpl<Predicate>::klass_symbols(KlassPtr klass) {
+  assert(klass != NULL, "invariant");
+  assert(_predicate(klass), "invariant");
+  const InstanceKlass* const ik = (const InstanceKlass*)klass;
+  if (ik->is_anonymous()) {
+    CStringEntryPtr entry =
+      this->_artifacts->map_cstring(JfrSymbolId::anonymous_klass_name_hash_code(ik));
+    assert(entry != NULL, "invariant");
+    return _unique_predicate(entry->id()) ? write__artifact__cstring__entry__(this->_writer, entry) : 0;
+  }
+  SymbolEntryPtr entry = this->_artifacts->map_symbol(ik->name());
+  assert(entry != NULL, "invariant");
+  return _unique_predicate(entry->id()) ? write__artifact__symbol__entry__(this->_writer, entry) : 0;
+}
+
+// XXX
+// template <template <typename> class Predicate>
+// int KlassSymbolWriterImpl<Predicate>::package_symbols(PkgPtr pkg) {
+//   assert(pkg != NULL, "invariant");
+//   SymbolPtr pkg_name = pkg->name();
+//   assert(pkg_name != NULL, "invariant");
+//   SymbolEntryPtr package_symbol = this->_artifacts->map_symbol(pkg_name);
+//   assert(package_symbol != NULL, "invariant");
+//   return _unique_predicate(package_symbol->id()) ?
+//     write__artifact__symbol__entry__(this->_writer, package_symbol) : 0;
+// }
+
+// XXX
+// template <template <typename> class Predicate>
+// int KlassSymbolWriterImpl<Predicate>::module_symbols(ModPtr module) {
+//   assert(module != NULL, "invariant");
+//   assert(module->is_named(), "invariant");
+//   int count = 0;
+//   SymbolPtr sym = module->name();
+//   SymbolEntryPtr entry = NULL;
+//   if (sym != NULL) {
+//     entry = this->_artifacts->map_symbol(sym);
+//     assert(entry != NULL, "invariant");
+//     if (_unique_predicate(entry->id())) {
+//       count += write__artifact__symbol__entry__(this->_writer, entry);
+//     }
+//   }
+//   sym = module->version();
+//   if (sym != NULL) {
+//     entry = this->_artifacts->map_symbol(sym);
+//     assert(entry != NULL, "invariant");
+//     if (_unique_predicate(entry->id())) {
+//       count += write__artifact__symbol__entry__(this->_writer, entry);
+//     }
+//   }
+//   sym = module->location();
+//   if (sym != NULL) {
+//     entry = this->_artifacts->map_symbol(sym);
+//     assert(entry != NULL, "invariant");
+//     if (_unique_predicate(entry->id())) {
+//       count += write__artifact__symbol__entry__(this->_writer, entry);
+//     }
+//   }
+//   return count;
+// }
+
+template <template <typename> class Predicate>
+int KlassSymbolWriterImpl<Predicate>::class_loader_symbols(CldPtr cld) {
+  assert(cld != NULL, "invariant");
+  assert(!cld->is_anonymous(), "invariant");
+  int count = 0;
+  // class loader type
+  const Klass* class_loader_klass = cld->class_loader() != NULL ? cld->class_loader()->klass() : NULL;
+  if (class_loader_klass == NULL) {
+    // (primordial) boot class loader
+    CStringEntryPtr entry = this->_artifacts->map_cstring(0);
+    assert(entry != NULL, "invariant");
+    assert(strncmp(entry->literal(),
+      BOOTSTRAP_LOADER_NAME,
+      BOOTSTRAP_LOADER_NAME_LEN) == 0, "invariant");
+    if (_unique_predicate(entry->id())) {
+      count += write__artifact__cstring__entry__(this->_writer, entry);
+    }
+  } else {
+    const Symbol* class_loader_name = class_loader_klass->name()/* XXX TODO cld->name()*/;
+    if (class_loader_name != NULL) {
+      SymbolEntryPtr entry = this->_artifacts->map_symbol(class_loader_name);
+      assert(entry != NULL, "invariant");
+      if (_unique_predicate(entry->id())) {
+        count += write__artifact__symbol__entry__(this->_writer, entry);
+      }
+    }
+  }
+  return count;
+}
+
+template <template <typename> class Predicate>
+int KlassSymbolWriterImpl<Predicate>::method_symbols(KlassPtr klass) {
+  assert(_predicate(klass), "invariant");
+  assert(_method_used_predicate(klass), "invariant");
+  assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
+  int count = 0;
+  const InstanceKlass* const ik = InstanceKlass::cast((Klass*)klass);
+  const int len = ik->methods()->length();
+  for (int i = 0; i < len; ++i) {
+    MethodPtr method = ik->methods()->at(i);
+    if (_method_flag_predicate(method)) {
+      SymbolEntryPtr entry = this->_artifacts->map_symbol(method->name());
+      assert(entry != NULL, "invariant");
+      if (_unique_predicate(entry->id())) {
+        count += write__artifact__symbol__entry__(this->_writer, entry);
+      }
+      entry = this->_artifacts->map_symbol(method->signature());
+      assert(entry != NULL, "invariant");
+      if (_unique_predicate(entry->id())) {
+        count += write__artifact__symbol__entry__(this->_writer, entry);
+      }
+    }
+  }
+  return count;
+}
+
+typedef KlassSymbolWriterImpl<LeakPredicate> LeakKlassSymbolWriterImpl;
+typedef JfrArtifactWriterHost<LeakKlassSymbolWriterImpl, TYPE_SYMBOL> LeakKlassSymbolWriter;
+
+class ClearKlassAndMethods {
+ private:
+  ClearArtifact<KlassPtr> _clear_klass_tag_bits;
+  ClearArtifact<MethodPtr> _clear_method_flag;
+  MethodUsedPredicate<false> _method_used_predicate;
+
+ public:
+  ClearKlassAndMethods(bool class_unload) : _clear_klass_tag_bits(class_unload),
+                                            _clear_method_flag(class_unload),
+                                            _method_used_predicate(class_unload) {}
+  bool operator()(KlassPtr klass) {
+    if (_method_used_predicate(klass)) {
+      const InstanceKlass* ik = InstanceKlass::cast((Klass*)klass);
+      const int len = ik->methods()->length();
+      for (int i = 0; i < len; ++i) {
+        MethodPtr method = ik->methods()->at(i);
+        _clear_method_flag(method);
+      }
+    }
+    _clear_klass_tag_bits(klass);
+    return true;
+  }
+};
+
+typedef CompositeFunctor<KlassPtr,
+                         TagLeakpKlassArtifact,
+                         LeakKlassWriter> LeakpKlassArtifactTagging;
+
+typedef CompositeFunctor<KlassPtr,
+                         LeakpKlassArtifactTagging,
+                         KlassWriter> CompositeKlassWriter;
+
+typedef CompositeFunctor<KlassPtr,
+                         CompositeKlassWriter,
+                         KlassArtifactRegistrator> CompositeKlassWriterRegistration;
+
+typedef CompositeFunctor<KlassPtr,
+                         KlassWriter,
+                         KlassArtifactRegistrator> KlassWriterRegistration;
+
+typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
+typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> CompositeKlassCallback;
+
+/*
+ * Composite operation
+ *
+ * TagLeakpKlassArtifact ->
+ *   LeakpPredicate ->
+ *     LeakpKlassWriter ->
+ *       KlassPredicate ->
+ *         KlassWriter ->
+ *           KlassWriterRegistration
+ */
+void JfrTypeSet::write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
+  assert(!_artifacts->has_klass_entries(), "invariant");
+  KlassArtifactRegistrator reg(_artifacts);
+  KlassWriter kw(writer, _artifacts, _class_unload);
+  KlassWriterRegistration kwr(&kw, &reg);
+  if (leakp_writer == NULL) {
+    KlassCallback callback(&kwr);
+    _subsystem_callback = &callback;
+    do_klasses();
+    return;
+  }
+  TagLeakpKlassArtifact tagging(_class_unload);
+  LeakKlassWriter lkw(leakp_writer, _artifacts, _class_unload);
+  LeakpKlassArtifactTagging lpkat(&tagging, &lkw);
+  CompositeKlassWriter ckw(&lpkat, &kw);
+  CompositeKlassWriterRegistration ckwr(&ckw, &reg);
+  CompositeKlassCallback callback(&ckwr);
+  _subsystem_callback = &callback;
+  do_klasses();
+}
+
+// XXX
+// typedef CompositeFunctor<PkgPtr,
+//                          PackageWriter,
+//                          ClearArtifact<PkgPtr> > PackageWriterWithClear;
+
+// typedef CompositeFunctor<PkgPtr,
+//                          LeakPackageWriter,
+//                          PackageWriter> CompositePackageWriter;
+
+// typedef CompositeFunctor<PkgPtr,
+//                          CompositePackageWriter,
+//                          ClearArtifact<PkgPtr> > CompositePackageWriterWithClear;
+
+// class PackageFieldSelector {
+//  public:
+//   typedef PkgPtr TypePtr;
+//   static TypePtr select(KlassPtr klass) {
+//     assert(klass != NULL, "invariant");
+//     return ((InstanceKlass*)klass)->package();
+//   }
+// };
+
+// typedef KlassToFieldEnvelope<PackageFieldSelector,
+//                              PackageWriterWithClear> KlassPackageWriterWithClear;
+
+// typedef KlassToFieldEnvelope<PackageFieldSelector,
+//                              CompositePackageWriterWithClear> KlassCompositePackageWriterWithClear;
+
+// typedef JfrArtifactCallbackHost<PkgPtr, PackageWriterWithClear> PackageCallback;
+// typedef JfrArtifactCallbackHost<PkgPtr, CompositePackageWriterWithClear> CompositePackageCallback;
+
+// /*
+//  * Composite operation
+//  *
+//  * LeakpPackageWriter ->
+//  *   PackageWriter ->
+//  *     ClearArtifact<PackageEntry>
+//  *
+//  */
+// void JfrTypeSet::write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
+//   assert(_artifacts->has_klass_entries(), "invariant");
+//   ClearArtifact<PkgPtr> clear(_class_unload);
+//   PackageWriter pw(writer, _artifacts, _class_unload);
+//   if (leakp_writer == NULL) {
+//     PackageWriterWithClear pwwc(&pw, &clear);
+//     KlassPackageWriterWithClear kpwwc(&pwwc);
+//     _artifacts->iterate_klasses(kpwwc);
+//     PackageCallback callback(&pwwc);
+//     _subsystem_callback = &callback;
+//     do_packages();
+//     return;
+//   }
+//   LeakPackageWriter lpw(leakp_writer, _artifacts, _class_unload);
+//   CompositePackageWriter cpw(&lpw, &pw);
+//   CompositePackageWriterWithClear cpwwc(&cpw, &clear);
+//   KlassCompositePackageWriterWithClear ckpw(&cpwwc);
+//   _artifacts->iterate_klasses(ckpw);
+//   CompositePackageCallback callback(&cpwwc);
+//   _subsystem_callback = &callback;
+//   do_packages();
+// }
+
+// typedef CompositeFunctor<ModPtr,
+//                          ModuleWriter,
+//                          ClearArtifact<ModPtr> > ModuleWriterWithClear;
+
+// typedef CompositeFunctor<ModPtr,
+//                          LeakModuleWriter,
+//                          ModuleWriter> CompositeModuleWriter;
+
+// typedef CompositeFunctor<ModPtr,
+//                          CompositeModuleWriter,
+//                          ClearArtifact<ModPtr> > CompositeModuleWriterWithClear;
+
+// typedef JfrArtifactCallbackHost<ModPtr, ModuleWriterWithClear> ModuleCallback;
+// typedef JfrArtifactCallbackHost<ModPtr, CompositeModuleWriterWithClear> CompositeModuleCallback;
+
+// XXX
+// class ModuleFieldSelector {
+//  public:
+//   typedef ModPtr TypePtr;
+//   static TypePtr select(KlassPtr klass) {
+//     assert(klass != NULL, "invariant");
+//     PkgPtr pkg = klass->package();
+//     return pkg != NULL ? pkg->module() : NULL;
+//   }
+// };
+
+// typedef KlassToFieldEnvelope<ModuleFieldSelector,
+//                              ModuleWriterWithClear> KlassModuleWriterWithClear;
+
+// typedef KlassToFieldEnvelope<ModuleFieldSelector,
+//                              CompositeModuleWriterWithClear> KlassCompositeModuleWriterWithClear;
+
+typedef CompositeFunctor<CldPtr, CldWriter, ClearArtifact<CldPtr> > CldWriterWithClear;
+typedef CompositeFunctor<CldPtr, LeakCldWriter, CldWriter> CompositeCldWriter;
+typedef CompositeFunctor<CldPtr, CompositeCldWriter, ClearArtifact<CldPtr> > CompositeCldWriterWithClear;
+typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
+typedef JfrArtifactCallbackHost<CldPtr, CompositeCldWriterWithClear> CompositeCldCallback;
+
+class CldFieldSelector {
+ public:
+  typedef CldPtr TypePtr;
+  static TypePtr select(KlassPtr klass) {
+    assert(klass != NULL, "invariant");
+    CldPtr cld = klass->class_loader_data();
+    return cld->is_anonymous() ? NULL : cld;
+  }
+};
+
+typedef KlassToFieldEnvelope<CldFieldSelector, CldWriterWithClear> KlassCldWriterWithClear;
+typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriterWithClear> KlassCompositeCldWriterWithClear;
+
+/*
+ * Composite operation
+ *
+ * LeakpClassLoaderWriter ->
+ *   ClassLoaderWriter ->
+ *     ClearArtifact<ClassLoaderData>
+ */
+void JfrTypeSet::write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
+  assert(_artifacts->has_klass_entries(), "invariant");
+  ClearArtifact<CldPtr> clear(_class_unload);
+  CldWriter cldw(writer, _artifacts, _class_unload);
+  if (leakp_writer == NULL) {
+    CldWriterWithClear cldwwc(&cldw, &clear);
+    KlassCldWriterWithClear kcldwwc(&cldwwc);
+    _artifacts->iterate_klasses(kcldwwc);
+    CldCallback callback(&cldwwc);
+    _subsystem_callback = &callback;
+    do_class_loaders();
+    return;
+  }
+  LeakCldWriter lcldw(leakp_writer, _artifacts, _class_unload);
+  CompositeCldWriter ccldw(&lcldw, &cldw);
+  CompositeCldWriterWithClear ccldwwc(&ccldw, &clear);
+  KlassCompositeCldWriterWithClear kcclwwc(&ccldwwc);
+  _artifacts->iterate_klasses(kcclwwc);
+  CompositeCldCallback callback(&ccldwwc);
+  _subsystem_callback = &callback;
+  do_class_loaders();
+}
+
+template <bool predicate_bool, typename MethodFunctor>
+class MethodIteratorHost {
+ private:
+  MethodFunctor _method_functor;
+  MethodUsedPredicate<predicate_bool> _method_used_predicate;
+  MethodFlagPredicate _method_flag_predicate;
+
+ public:
+  MethodIteratorHost(JfrCheckpointWriter* writer,
+                     JfrArtifactSet* artifacts,
+                     bool class_unload,
+                     bool skip_header = false) :
+    _method_functor(writer, artifacts, class_unload, skip_header),
+    _method_used_predicate(class_unload),
+    _method_flag_predicate(class_unload) {}
+
+  bool operator()(KlassPtr klass) {
+    if (_method_used_predicate(klass)) {
+      assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
+      const InstanceKlass* ik = InstanceKlass::cast((Klass*)klass);
+      const int len = ik->methods()->length();
+      for (int i = 0; i < len; ++i) {
+        MethodPtr method = ik->methods()->at(i);
+        if (_method_flag_predicate(method)) {
+          _method_functor(method);
+        }
+      }
+    }
+    return true;
+  }
+
+  int count() const { return _method_functor.count(); }
+  void add(int count) { _method_functor.add(count); }
+};
+
+typedef MethodIteratorHost<true /*leakp */,  MethodWriterImpl> LeakMethodWriter;
+typedef MethodIteratorHost<false, MethodWriterImpl> MethodWriter;
+typedef CompositeFunctor<KlassPtr, LeakMethodWriter, MethodWriter> CompositeMethodWriter;
+
+/*
+ * Composite operation
+ *
+ * LeakpMethodWriter ->
+ *   MethodWriter
+ */
+void JfrTypeSet::write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
+  assert(_artifacts->has_klass_entries(), "invariant");
+  MethodWriter mw(writer, _artifacts, _class_unload);
+  if (leakp_writer == NULL) {
+    _artifacts->iterate_klasses(mw);
+    return;
+  }
+  LeakMethodWriter lpmw(leakp_writer, _artifacts, _class_unload);
+  CompositeMethodWriter cmw(&lpmw, &mw);
+  _artifacts->iterate_klasses(cmw);
+}
+static void write_symbols_leakp(JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool class_unload) {
+  assert(leakp_writer != NULL, "invariant");
+  assert(artifacts != NULL, "invariant");
+  LeakKlassSymbolWriter lpksw(leakp_writer, artifacts, class_unload);
+  artifacts->iterate_klasses(lpksw);
+}
+static void write_symbols(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool class_unload) {
+  assert(writer != NULL, "invariant");
+  assert(artifacts != NULL, "invariant");
+  if (leakp_writer != NULL) {
+    write_symbols_leakp(leakp_writer, artifacts, class_unload);
+  }
+  // iterate all registered symbols
+  SymbolEntryWriter symbol_writer(writer, artifacts, class_unload);
+  artifacts->iterate_symbols(symbol_writer);
+  CStringEntryWriter cstring_writer(writer, artifacts, class_unload, true); // skip header
+  artifacts->iterate_cstrings(cstring_writer);
+  symbol_writer.add(cstring_writer.count());
+}
+
+bool JfrTypeSet::_class_unload = false;
+JfrArtifactSet* JfrTypeSet::_artifacts = NULL;
+JfrArtifactClosure* JfrTypeSet::_subsystem_callback = NULL;
+
+void JfrTypeSet::write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
+  assert(writer != NULL, "invariant");
+  assert(_artifacts->has_klass_entries(), "invariant");
+  write_symbols(writer, leakp_writer, _artifacts, _class_unload);
+}
+
+void JfrTypeSet::do_unloaded_klass(Klass* klass) {
+  assert(klass != NULL, "invariant");
+  assert(_subsystem_callback != NULL, "invariant");
+  if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
+    JfrEventClasses::increment_unloaded_event_class();
+  }
+  if (USED_THIS_EPOCH(klass)) { // includes leakp subset
+    _subsystem_callback->do_artifact(klass);
+    return;
+  }
+  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
+    SET_LEAKP_USED_THIS_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
+    _subsystem_callback->do_artifact(klass);
+  }
+}
+
+void JfrTypeSet::do_klass(Klass* klass) {
+  assert(klass != NULL, "invariant");
+  assert(_subsystem_callback != NULL, "invariant");
+  if (USED_PREV_EPOCH(klass)) { // includes leakp subset
+    _subsystem_callback->do_artifact(klass);
+    return;
+  }
+  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
+    SET_LEAKP_USED_PREV_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
+    _subsystem_callback->do_artifact(klass);
+  }
+}
+
+void JfrTypeSet::do_klasses() {
+  if (_class_unload) {
+    ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
+    return;
+  }
+  ClassLoaderDataGraph::classes_do(&do_klass);
+}
+
+// XXX
+// void JfrTypeSet::do_unloaded_package(PackageEntry* entry) {
+//   assert(entry != NULL, "invariant");
+//   assert(_subsystem_callback != NULL, "invariant");
+//   if (ANY_USED_THIS_EPOCH(entry)) { // includes leakp subset
+//     _subsystem_callback->do_artifact(entry);
+//   }
+// }
+
+// void JfrTypeSet::do_package(PackageEntry* entry) {
+//   assert(_subsystem_callback != NULL, "invariant");
+//   if (ANY_USED_PREV_EPOCH(entry)) { // includes leakp subset
+//     _subsystem_callback->do_artifact(entry);
+//   }
+// }
+
+// void JfrTypeSet::do_packages() {
+//   if (_class_unload) {
+//     ClassLoaderDataGraph::packages_unloading_do(&do_unloaded_package);
+//     return;
+//   }
+//   ClassLoaderDataGraph::packages_do(&do_package);
+// }
+
+void JfrTypeSet::do_unloaded_class_loader_data(ClassLoaderData* cld) {
+  assert(_subsystem_callback != NULL, "invariant");
+  if (ANY_USED_THIS_EPOCH(cld)) { // includes leakp subset
+    _subsystem_callback->do_artifact(cld);
+  }
+}
+
+void JfrTypeSet::do_class_loader_data(ClassLoaderData* cld) {
+  assert(_subsystem_callback != NULL, "invariant");
+  if (ANY_USED_PREV_EPOCH(cld)) { // includes leakp subset
+    _subsystem_callback->do_artifact(cld);
+  }
+}
+
+class CLDCallback : public CLDClosure {
+ private:
+  bool _class_unload;
+ public:
+  CLDCallback(bool class_unload) : _class_unload(class_unload) {}
+  void do_cld(ClassLoaderData* cld) {
+     assert(cld != NULL, "invariant");
+    if (cld->is_anonymous()) {
+      return;
+    }
+    if (_class_unload) {
+      JfrTypeSet::do_unloaded_class_loader_data(cld);
+      return;
+    }
+    JfrTypeSet::do_class_loader_data(cld);
+  }
+};
+
+void JfrTypeSet::do_class_loaders() {
+  CLDCallback cld_cb(_class_unload);
+  if (_class_unload) {
+    ClassLoaderDataGraph::cld_unloading_do(&cld_cb);
+    return;
+  }
+  ClassLoaderDataGraph::cld_do(&cld_cb);
+}
+
+static void clear_artifacts(JfrArtifactSet* artifacts,
+                            bool class_unload) {
+  assert(artifacts != NULL, "invariant");
+  assert(artifacts->has_klass_entries(), "invariant");
+
+  // untag
+  ClearKlassAndMethods clear(class_unload);
+  artifacts->iterate_klasses(clear);
+  artifacts->clear();
+}
+
+/**
+ * Write all "tagged" (in-use) constant artifacts and their dependencies.
+ */
+void JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload) {
+  assert(writer != NULL, "invariant");
+  ResourceMark rm;
+  // initialization begin
+  _class_unload = class_unload;
+  ++checkpoint_id;
+  if (_artifacts == NULL) {
+    _artifacts = new JfrArtifactSet(class_unload);
+    _subsystem_callback = NULL;
+  } else {
+    _artifacts->initialize(class_unload);
+    _subsystem_callback = NULL;
+  }
+  assert(_artifacts != NULL, "invariant");
+  assert(!_artifacts->has_klass_entries(), "invariant");
+  assert(_subsystem_callback == NULL, "invariant");
+  // initialization complete
+
+  // write order is important because an individual write step
+  // might tag an artifact to be written in a subsequent step
+  write_klass_constants(writer, leakp_writer);
+  if (_artifacts->has_klass_entries()) {
+// XXX    write_package_constants(writer, leakp_writer);
+    write_class_loader_constants(writer, leakp_writer);
+    write_method_constants(writer, leakp_writer);
+    write_symbol_constants(writer, leakp_writer);
+    clear_artifacts(_artifacts, class_unload);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSet.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class ClassLoaderData;
+class JfrArtifactClosure;
+class JfrArtifactSet;
+class JfrCheckpointWriter;
+class Klass;
+
+// XXX class PackageEntry;
+
+class JfrTypeSet : AllStatic {
+  friend class CLDCallback;
+  friend class JfrTypeManager;
+  friend class TypeSetSerialization;
+ private:
+  static JfrArtifactSet* _artifacts;
+  static JfrArtifactClosure* _subsystem_callback;
+  static bool _class_unload;
+
+  static void do_klass(Klass* k);
+  static void do_unloaded_klass(Klass* k);
+  static void do_klasses();
+
+  // XXX
+  // static void do_package(PackageEntry* entry);
+  // static void do_unloaded_package(PackageEntry* entry);
+  // static void do_packages();
+
+  static void do_class_loader_data(ClassLoaderData* cld);
+  static void do_unloaded_class_loader_data(ClassLoaderData* cld);
+  static void do_class_loaders();
+
+  static void write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
+// XXX  static void write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
+  static void write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
+  static void write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
+  static void write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
+  static void serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/symbol.hpp"
+
+JfrSymbolId::JfrSymbolId() : _symbol_id_counter(0), _sym_table(new SymbolTable(this)), _cstring_table(new CStringTable(this)) {
+  assert(_sym_table != NULL, "invariant");
+  assert(_cstring_table != NULL, "invariant");
+  initialize();
+}
+
+void JfrSymbolId::initialize() {
+  clear();
+  assert(_symbol_id_counter == 0, "invariant");
+}
+
+void JfrSymbolId::clear() {
+  assert(_sym_table != NULL, "invariant");
+  if (_sym_table->has_entries()) {
+    _sym_table->clear_entries();
+  }
+  assert(!_sym_table->has_entries(), "invariant");
+
+  assert(_cstring_table != NULL, "invariant");
+  if (_cstring_table->has_entries()) {
+    _cstring_table->clear_entries();
+  }
+  assert(!_cstring_table->has_entries(), "invariant");
+  _symbol_id_counter = 0;
+}
+
+JfrSymbolId::~JfrSymbolId() {
+  delete _sym_table;
+  delete _cstring_table;
+}
+
+traceid JfrSymbolId::mark_anonymous_klass_name(const Klass* k) {
+  assert(k != NULL, "invariant");
+  assert(k->oop_is_instance(), "invariant");
+  assert(is_anonymous_klass(k), "invariant");
+
+  uintptr_t anonymous_symbol_hash_code = 0;
+  const char* const anonymous_symbol =
+    create_anonymous_klass_symbol((const InstanceKlass*)k, anonymous_symbol_hash_code);
+
+  if (anonymous_symbol == NULL) {
+    return 0;
+  }
+
+  assert(anonymous_symbol_hash_code != 0, "invariant");
+  traceid symbol_id = mark(anonymous_symbol, anonymous_symbol_hash_code);
+  assert(mark(anonymous_symbol, anonymous_symbol_hash_code) == symbol_id, "invariant");
+  return symbol_id;
+}
+
+const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(const Symbol* symbol) const {
+  return _sym_table->lookup_only(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
+}
+
+const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(uintptr_t hash) const {
+  return _sym_table->lookup_only(NULL, hash);
+}
+
+const JfrSymbolId::CStringEntry* JfrSymbolId::map_cstring(uintptr_t hash) const {
+  return _cstring_table->lookup_only(NULL, hash);
+}
+
+void JfrSymbolId::assign_id(SymbolEntry* entry) {
+  assert(entry != NULL, "invariant");
+  assert(entry->id() == 0, "invariant");
+  entry->set_id(++_symbol_id_counter);
+}
+
+bool JfrSymbolId::equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry) {
+  // query might be NULL
+  assert(entry != NULL, "invariant");
+  assert(entry->hash() == hash, "invariant");
+  return true;
+}
+
+void JfrSymbolId::assign_id(CStringEntry* entry) {
+  assert(entry != NULL, "invariant");
+  assert(entry->id() == 0, "invariant");
+  entry->set_id(++_symbol_id_counter);
+}
+
+bool JfrSymbolId::equals(const char* query, uintptr_t hash, const CStringEntry* entry) {
+  // query might be NULL
+  assert(entry != NULL, "invariant");
+  assert(entry->hash() == hash, "invariant");
+  return true;
+}
+
+traceid JfrSymbolId::mark(const Klass* k) {
+  assert(k != NULL, "invariant");
+  traceid symbol_id = 0;
+  if (is_anonymous_klass(k)) {
+    symbol_id = mark_anonymous_klass_name(k);
+  }
+  if (0 == symbol_id) {
+    const Symbol* const sym = k->name();
+    if (sym != NULL) {
+      symbol_id = mark(sym);
+    }
+  }
+  assert(symbol_id > 0, "a symbol handler must mark the symbol for writing");
+  return symbol_id;
+}
+
+traceid JfrSymbolId::mark(const Symbol* symbol) {
+  assert(symbol != NULL, "invariant");
+  return mark(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
+}
+
+traceid JfrSymbolId::mark(const Symbol* data, uintptr_t hash) {
+  assert(data != NULL, "invariant");
+  assert(_sym_table != NULL, "invariant");
+  return _sym_table->id(data, hash);
+}
+
+traceid JfrSymbolId::mark(const char* str, uintptr_t hash) {
+  assert(str != NULL, "invariant");
+  return _cstring_table->id(str, hash);
+}
+
+bool JfrSymbolId::is_anonymous_klass(const Klass* k) {
+  assert(k != NULL, "invariant");
+  return k->oop_is_instance() && ((const InstanceKlass*)k)->is_anonymous();
+}
+
+/*
+* jsr292 anonymous classes symbol is the external name +
+* the identity_hashcode slash appended:
+*   java.lang.invoke.LambdaForm$BMH/22626602
+*
+* caller needs ResourceMark
+*/
+
+uintptr_t JfrSymbolId::anonymous_klass_name_hash_code(const InstanceKlass* ik) {
+  assert(ik != NULL, "invariant");
+  assert(ik->is_anonymous(), "invariant");
+  const oop mirror = ik->java_mirror();
+  assert(mirror != NULL, "invariant");
+  return (uintptr_t)mirror->identity_hash();
+}
+
+const char* JfrSymbolId::create_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode) {
+  assert(ik != NULL, "invariant");
+  assert(ik->is_anonymous(), "invariant");
+  assert(0 == hashcode, "invariant");
+  char* anonymous_symbol = NULL;
+  const oop mirror = ik->java_mirror();
+  assert(mirror != NULL, "invariant");
+  char hash_buf[40];
+  hashcode = anonymous_klass_name_hash_code(ik);
+  sprintf(hash_buf, "/" UINTX_FORMAT, hashcode);
+  const size_t hash_len = strlen(hash_buf);
+  const size_t result_len = ik->name()->utf8_length();
+  anonymous_symbol = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1);
+  ik->name()->as_klass_external_name(anonymous_symbol, (int)result_len + 1);
+  assert(strlen(anonymous_symbol) == result_len, "invariant");
+  strcpy(anonymous_symbol + result_len, hash_buf);
+  assert(strlen(anonymous_symbol) == result_len + hash_len, "invariant");
+  return anonymous_symbol;
+}
+
+uintptr_t JfrSymbolId::regular_klass_name_hash_code(const Klass* k) {
+  assert(k != NULL, "invariant");
+  const Symbol* const sym = k->name();
+  assert(sym != NULL, "invariant");
+  return (uintptr_t)const_cast<Symbol*>(sym)->identity_hash();
+}
+
+JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_id(new JfrSymbolId()),
+                                                    _klass_list(NULL),
+                                                    _class_unload(class_unload) {
+  initialize(class_unload);
+  assert(_klass_list != NULL, "invariant");
+}
+
+static const size_t initial_class_list_size = 200;
+void JfrArtifactSet::initialize(bool class_unload) {
+  assert(_symbol_id != NULL, "invariant");
+  _symbol_id->initialize();
+  assert(!_symbol_id->has_entries(), "invariant");
+  _symbol_id->mark(BOOTSTRAP_LOADER_NAME, 0); // pre-load "bootstrap"
+  _class_unload = class_unload;
+  // resource allocation
+  _klass_list = new GrowableArray<const Klass*>(initial_class_list_size, false, mtTracing);
+}
+
+JfrArtifactSet::~JfrArtifactSet() {
+  clear();
+}
+
+void JfrArtifactSet::clear() {
+  _symbol_id->clear();
+  // _klass_list will be cleared by a ResourceMark
+}
+
+traceid JfrArtifactSet::mark_anonymous_klass_name(const Klass* klass) {
+  return _symbol_id->mark_anonymous_klass_name(klass);
+}
+
+traceid JfrArtifactSet::mark(const Symbol* sym, uintptr_t hash) {
+  return _symbol_id->mark(sym, hash);
+}
+
+traceid JfrArtifactSet::mark(const Klass* klass) {
+  return _symbol_id->mark(klass);
+}
+
+traceid JfrArtifactSet::mark(const Symbol* symbol) {
+  return _symbol_id->mark(symbol);
+}
+
+traceid JfrArtifactSet::mark(const char* const str, uintptr_t hash) {
+  return _symbol_id->mark(str, hash);
+}
+
+const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(const Symbol* symbol) const {
+  return _symbol_id->map_symbol(symbol);
+}
+
+const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(uintptr_t hash) const {
+  return _symbol_id->map_symbol(hash);
+}
+
+const JfrSymbolId::CStringEntry* JfrArtifactSet::map_cstring(uintptr_t hash) const {
+  return _symbol_id->map_cstring(hash);
+}
+
+bool JfrArtifactSet::has_klass_entries() const {
+  return _klass_list->is_nonempty();
+}
+
+int JfrArtifactSet::entries() const {
+  return _klass_list->length();
+}
+
+void JfrArtifactSet::register_klass(const Klass* k) {
+  assert(k != NULL, "invariant");
+  assert(_klass_list != NULL, "invariant");
+  assert(_klass_list->find(k) == -1, "invariant");
+  _klass_list->append(k);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
+#define SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
+
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrHashtable.hpp"
+#include "oops/klass.hpp"
+#include "oops/method.hpp"
+#include "utilities/growableArray.hpp"
+
+// XXX is it correct?
+// external name (synthetic) for the primordial "bootstrap" class loader instance
+#define BOOTSTRAP_LOADER_NAME "<bootloader>" // XXX bootstrap
+#define BOOTSTRAP_LOADER_NAME_LEN 9
+
+// Composite callback/functor building block
+template <typename T, typename Func1, typename Func2>
+class CompositeFunctor {
+ private:
+  Func1* _f;
+  Func2* _g;
+ public:
+  CompositeFunctor(Func1* f, Func2* g) : _f(f), _g(g) {
+    assert(f != NULL, "invariant");
+    assert(g != NULL, "invariant");
+  }
+  bool operator()(T const& value) {
+    return (*_f)(value) && (*_g)(value);
+  }
+};
+
+class JfrArtifactClosure {
+ public:
+  virtual void do_artifact(const void* artifact) = 0;
+};
+
+template <typename T, typename Callback>
+class JfrArtifactCallbackHost : public JfrArtifactClosure {
+ private:
+  Callback* _callback;
+ public:
+  JfrArtifactCallbackHost(Callback* callback) : _callback(callback) {}
+  void do_artifact(const void* artifact) {
+    (*_callback)(reinterpret_cast<T const&>(artifact));
+  }
+};
+
+template <typename FieldSelector, typename Letter>
+class KlassToFieldEnvelope {
+  Letter* _letter;
+ public:
+  KlassToFieldEnvelope(Letter* letter) : _letter(letter) {}
+  bool operator()(const Klass* klass) {
+    typename FieldSelector::TypePtr t = FieldSelector::select(klass);
+    return t != NULL ? (*_letter)(t) : true;
+  }
+};
+
+template <typename T>
+void tag_leakp_artifact(T const& value, bool class_unload) {
+  assert(value != NULL, "invariant");
+  if (class_unload) {
+    SET_LEAKP_USED_THIS_EPOCH(value);
+    assert(LEAKP_USED_THIS_EPOCH(value), "invariant");
+  } else {
+    SET_LEAKP_USED_PREV_EPOCH(value);
+    assert(LEAKP_USED_PREV_EPOCH(value), "invariant");
+  }
+}
+
+template <typename T>
+class LeakpClearArtifact {
+  bool _class_unload;
+ public:
+  LeakpClearArtifact(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(T const& value) {
+    if (_class_unload) {
+      if (LEAKP_USED_THIS_EPOCH(value)) {
+        LEAKP_UNUSE_THIS_EPOCH(value);
+      }
+    } else {
+      if (LEAKP_USED_PREV_EPOCH(value)) {
+        LEAKP_UNUSE_PREV_EPOCH(value);
+      }
+    }
+    return true;
+  }
+};
+
+template <typename T>
+class ClearArtifact {
+  bool _class_unload;
+ public:
+  ClearArtifact(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(T const& value) {
+    if (_class_unload) {
+      if (LEAKP_USED_THIS_EPOCH(value)) {
+        LEAKP_UNUSE_THIS_EPOCH(value);
+      }
+      if (USED_THIS_EPOCH(value)) {
+        UNUSE_THIS_EPOCH(value);
+      }
+      if (METHOD_USED_THIS_EPOCH(value)) {
+        UNUSE_METHOD_THIS_EPOCH(value);
+      }
+    } else {
+      if (LEAKP_USED_PREV_EPOCH(value)) {
+        LEAKP_UNUSE_PREV_EPOCH(value);
+      }
+      if (USED_PREV_EPOCH(value)) {
+        UNUSE_PREV_EPOCH(value);
+      }
+      if (METHOD_USED_PREV_EPOCH(value)) {
+        UNUSE_METHOD_PREV_EPOCH(value);
+      }
+    }
+    return true;
+  }
+};
+
+template <>
+class ClearArtifact<const Method*> {
+  bool _class_unload;
+ public:
+  ClearArtifact(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(const Method* method) {
+    if (_class_unload) {
+      if (METHOD_FLAG_USED_THIS_EPOCH(method)) {
+        CLEAR_METHOD_FLAG_USED_THIS_EPOCH(method);
+      }
+    } else {
+      if (METHOD_FLAG_USED_PREV_EPOCH(method)) {
+        CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
+      }
+    }
+    return true;
+  }
+};
+
+template <typename T>
+class LeakPredicate {
+  bool _class_unload;
+ public:
+  LeakPredicate(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(T const& value) {
+    return _class_unload ? LEAKP_USED_THIS_EPOCH(value) : LEAKP_USED_PREV_EPOCH(value);
+  }
+};
+
+template <typename T>
+class UsedPredicate {
+  bool _class_unload;
+ public:
+  UsedPredicate(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(T const& value) {
+    return _class_unload ? USED_THIS_EPOCH(value) : USED_PREV_EPOCH(value);
+  }
+};
+
+template <typename T, int compare(const T&, const T&)>
+class UniquePredicate {
+ private:
+  GrowableArray<T> _seen;
+ public:
+  UniquePredicate(bool) : _seen() {}
+  bool operator()(T const& value) {
+    bool not_unique;
+    _seen.template find_sorted<T, compare>(value, not_unique);
+    if (not_unique) {
+      return false;
+    }
+    _seen.template insert_sorted<compare>(value);
+    return true;
+  }
+};
+
+class MethodFlagPredicate {
+  bool _class_unload;
+ public:
+  MethodFlagPredicate(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(const Method* method) {
+    return _class_unload ? METHOD_FLAG_USED_THIS_EPOCH(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
+  }
+};
+
+template <bool leakp>
+class MethodUsedPredicate {
+  bool _class_unload;
+ public:
+  MethodUsedPredicate(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(const Klass* klass) {
+    assert(ANY_USED(klass), "invariant");
+    if (_class_unload) {
+      return leakp ? LEAKP_METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_THIS_EPOCH(klass);
+    }
+    return leakp ? LEAKP_METHOD_USED_PREV_EPOCH(klass) : METHOD_USED_PREV_EPOCH(klass);
+  }
+};
+
+class JfrSymbolId : public JfrCHeapObj {
+  template <typename, typename, template<typename, typename> class, typename, size_t>
+  friend class HashTableHost;
+  typedef HashTableHost<const Symbol*, traceid, Entry, JfrSymbolId> SymbolTable;
+  typedef HashTableHost<const char*, traceid, Entry, JfrSymbolId> CStringTable;
+ public:
+  typedef SymbolTable::HashEntry SymbolEntry;
+  typedef CStringTable::HashEntry CStringEntry;
+ private:
+  SymbolTable* _sym_table;
+  CStringTable* _cstring_table;
+  traceid _symbol_id_counter;
+
+  // hashtable(s) callbacks
+  void assign_id(SymbolEntry* entry);
+  bool equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry);
+  void assign_id(CStringEntry* entry);
+  bool equals(const char* query, uintptr_t hash, const CStringEntry* entry);
+
+ public:
+  static bool is_anonymous_klass(const Klass* k);
+  static const char* create_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode);
+  static uintptr_t anonymous_klass_name_hash_code(const InstanceKlass* ik);
+  static uintptr_t regular_klass_name_hash_code(const Klass* k);
+
+  JfrSymbolId();
+  ~JfrSymbolId();
+
+  void initialize();
+  void clear();
+
+  traceid mark_anonymous_klass_name(const Klass* k);
+  traceid mark(const Symbol* sym, uintptr_t hash);
+  traceid mark(const Klass* k);
+  traceid mark(const Symbol* symbol);
+  traceid mark(const char* str, uintptr_t hash);
+
+  const SymbolEntry* map_symbol(const Symbol* symbol) const;
+  const SymbolEntry* map_symbol(uintptr_t hash) const;
+  const CStringEntry* map_cstring(uintptr_t hash) const;
+
+  template <typename T>
+  void symbol(T& functor, const Klass* k) {
+    if (is_anonymous_klass(k)) {
+      return;
+    }
+    functor(map_symbol(regular_klass_name_hash_code(k)));
+  }
+
+  template <typename T>
+  void symbol(T& functor, const Method* method) {
+    assert(method != NULL, "invariant");
+    functor(map_symbol((uintptr_t)method->name()->identity_hash()));
+    functor(map_symbol((uintptr_t)method->signature()->identity_hash()));
+  }
+
+  template <typename T>
+  void cstring(T& functor, const Klass* k) {
+    if (!is_anonymous_klass(k)) {
+      return;
+    }
+    functor(map_cstring(anonymous_klass_name_hash_code((const InstanceKlass*)k)));
+  }
+
+  template <typename T>
+  void iterate_symbols(T& functor) {
+    _sym_table->iterate_entry(functor);
+  }
+
+  template <typename T>
+  void iterate_cstrings(T& functor) {
+    _cstring_table->iterate_entry(functor);
+  }
+
+  bool has_entries() const { return has_symbol_entries() || has_cstring_entries(); }
+  bool has_symbol_entries() const { return _sym_table->has_entries(); }
+  bool has_cstring_entries() const { return _cstring_table->has_entries(); }
+};
+
+/**
+ * When processing a set of artifacts, there will be a need
+ * to track transitive dependencies originating with each artifact.
+ * These might or might not be explicitly "tagged" at that point.
+ * With the introduction of "epochs" to allow for concurrent tagging,
+ * we attempt to avoid "tagging" an artifact to indicate its use in a
+ * previous epoch. This is mainly to reduce the risk for data races.
+ * Instead, JfrArtifactSet is used to track transitive dependencies
+ * during the write process itself.
+ *
+ * It can also provide opportunities for caching, as the ideal should
+ * be to reduce the amount of iterations neccessary for locating artifacts
+ * in the respective VM subsystems.
+ */
+class JfrArtifactSet : public JfrCHeapObj {
+ private:
+  JfrSymbolId* _symbol_id;
+  GrowableArray<const Klass*>* _klass_list;
+  bool _class_unload;
+
+ public:
+  JfrArtifactSet(bool class_unload);
+  ~JfrArtifactSet();
+
+  // caller needs ResourceMark
+  void initialize(bool class_unload);
+  void clear();
+
+  traceid mark(const Symbol* sym, uintptr_t hash);
+  traceid mark(const Klass* klass);
+  traceid mark(const Symbol* symbol);
+  traceid mark(const char* const str, uintptr_t hash);
+  traceid mark_anonymous_klass_name(const Klass* klass);
+
+  const JfrSymbolId::SymbolEntry* map_symbol(const Symbol* symbol) const;
+  const JfrSymbolId::SymbolEntry* map_symbol(uintptr_t hash) const;
+  const JfrSymbolId::CStringEntry* map_cstring(uintptr_t hash) const;
+
+  bool has_klass_entries() const;
+  int entries() const;
+  void register_klass(const Klass* k);
+
+  template <typename Functor>
+  void iterate_klasses(Functor& functor) const {
+    for (int i = 0; i < _klass_list->length(); ++i) {
+      if (!functor(_klass_list->at(i))) {
+        break;
+      }
+    }
+  }
+
+  template <typename T>
+  void iterate_symbols(T& functor) {
+    _symbol_id->iterate_symbols(functor);
+  }
+
+  template <typename T>
+  void iterate_cstrings(T& functor) {
+    _symbol_id->iterate_cstrings(functor);
+  }
+};
+
+class KlassArtifactRegistrator {
+ private:
+  JfrArtifactSet* _artifacts;
+ public:
+  KlassArtifactRegistrator(JfrArtifactSet* artifacts) :
+    _artifacts(artifacts) {
+    assert(_artifacts != NULL, "invariant");
+  }
+
+  bool operator()(const Klass* klass) {
+    assert(klass != NULL, "invariant");
+    _artifacts->register_klass(klass);
+    return true;
+  }
+};
+
+#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
+
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.hpp"
+
+template <typename WriterImpl, u4 ID>
+class JfrArtifactWriterHost : public StackObj {
+ private:
+  WriterImpl _impl;
+  JfrCheckpointWriter* _writer;
+  JfrCheckpointContext _ctx;
+  jlong _count_offset;
+  int _count;
+  bool _skip_header;
+ public:
+  JfrArtifactWriterHost(JfrCheckpointWriter* writer,
+                        JfrArtifactSet* artifacts,
+                        bool class_unload,
+                        bool skip_header = false) : _impl(writer, artifacts, class_unload),
+                                                    _writer(writer),
+                                                    _ctx(writer->context()),
+                                                    _count(0),
+                                                    _skip_header(skip_header) {
+    assert(_writer != NULL, "invariant");
+    if (!_skip_header) {
+      _writer->write_type((JfrTypeId)ID);
+      _count_offset = _writer->reserve(sizeof(u4)); // Don't know how many yet
+    }
+  }
+
+  ~JfrArtifactWriterHost() {
+    if (_count == 0) {
+      // nothing written, restore context for rewind
+      _writer->set_context(_ctx);
+      return;
+    }
+    assert(_count > 0, "invariant");
+    if (!_skip_header) {
+      _writer->write_count(_count, _count_offset);
+    }
+  }
+
+  bool operator()(typename WriterImpl::Type const & value) {
+    this->_count += _impl(value);
+    return true;
+  }
+
+  int count() const   { return _count; }
+  void add(int count) { _count += count; }
+};
+
+typedef int(*artifact_write_operation)(JfrCheckpointWriter*, JfrArtifactSet*, const void*);
+
+template <typename T, artifact_write_operation op>
+class JfrArtifactWriterImplHost {
+ private:
+  JfrCheckpointWriter* _writer;
+  JfrArtifactSet* _artifacts;
+  bool _class_unload;
+ public:
+  typedef T Type;
+  JfrArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool class_unload) :
+    _writer(writer), _artifacts(artifacts), _class_unload(class_unload) {}
+  int operator()(T const& value) {
+    return op(this->_writer, this->_artifacts, value);
+  }
+};
+
+template <typename T, typename Predicate, artifact_write_operation op>
+class JfrPredicatedArtifactWriterImplHost : public JfrArtifactWriterImplHost<T, op> {
+ private:
+  Predicate _predicate;
+  typedef JfrArtifactWriterImplHost<T, op> Parent;
+ public:
+  JfrPredicatedArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool class_unload) :
+    Parent(writer, artifacts, class_unload), _predicate(class_unload) {}
+  int operator()(T const& value) {
+    return _predicate(value) ? Parent::operator()(value) : 0;
+  }
+};
+
+#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/symbolTable.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "oops/arrayKlass.hpp"
+#include "oops/klass.inline.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/vm_version.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/debug.hpp"
+
+ // returns updated value
+static traceid atomic_inc(traceid volatile* const dest) {
+  return (traceid) atomic_add_jlong(1, (jlong volatile*) dest);
+}
+
+static traceid next_class_id() {
+  static volatile traceid class_id_counter = MaxJfrEventId + 100;
+  return atomic_inc(&class_id_counter) << TRACE_ID_SHIFT;
+}
+
+static traceid next_thread_id() {
+#ifdef _LP64
+  static volatile traceid thread_id_counter = 0;
+  return atomic_inc(&thread_id_counter);
+#else
+  static volatile jint thread_id_counter = 0;
+  assert(thread_id_counter >= 0 && thread_id_counter < INT_MAX, "thread counter has been overflown");
+  Atomic::inc(&thread_id_counter);
+  return (traceid) thread_id_counter;
+#endif
+}
+
+// XXX
+// static traceid next_package_id() {
+//   static volatile traceid package_id_counter = 1;
+//   return atomic_inc(&package_id_counter) << TRACE_ID_SHIFT;
+// }
+
+static traceid next_class_loader_data_id() {
+  static volatile traceid cld_id_counter = 1;
+  return atomic_inc(&cld_id_counter) << TRACE_ID_SHIFT;
+}
+
+static bool found_jdk_jfr_event_klass = false;
+
+static void check_klass(const Klass* klass) {
+  assert(klass != NULL, "invariant");
+  if (found_jdk_jfr_event_klass) {
+    return;
+  }
+  static const Symbol* jdk_jfr_event_sym = NULL;
+  if (jdk_jfr_event_sym == NULL) {
+    // setup when loading the first TypeArrayKlass (Universe::genesis) hence single threaded invariant
+    jdk_jfr_event_sym = SymbolTable::new_permanent_symbol("jdk/jfr/Event", Thread::current());
+  }
+  assert(jdk_jfr_event_sym != NULL, "invariant");
+  if (jdk_jfr_event_sym == klass->name()/* XXX && klass->class_loader() == NULL*/) {
+    found_jdk_jfr_event_klass = true;
+    JfrTraceId::tag_as_jdk_jfr_event(klass);
+  }
+}
+
+void JfrTraceId::assign(const Klass* klass) {
+  assert(klass != NULL, "invariant");
+  klass->set_trace_id(next_class_id());
+  check_klass(klass);
+  const Klass* const super = klass->super();
+  if (super == NULL) {
+    return;
+  }
+  if (IS_EVENT_KLASS(super)) {
+    tag_as_jdk_jfr_event_sub(klass);
+  }
+}
+
+// XXX
+// void JfrTraceId::assign(const PackageEntry* package) {
+//   assert(package != NULL, "invariant");
+//   package->set_trace_id(next_package_id());
+// }
+
+void JfrTraceId::assign(const ClassLoaderData* cld) {
+  assert(cld != NULL, "invariant");
+  if (cld->is_anonymous()) {
+    cld->set_trace_id(0);
+    return;
+  }
+  cld->set_trace_id(next_class_loader_data_id());
+}
+
+traceid JfrTraceId::assign_thread_id() {
+  return next_thread_id();
+}
+
+// used by CDS / APPCDS as part of "remove_unshareable_info"
+void JfrTraceId::remove(const Klass* k) {
+  assert(k != NULL, "invariant");
+  // Mask off and store the event flags.
+  // This mechanism will retain the event specific flags
+  // in the archive, allowing for event flag restoration
+  // when renewing the traceid on klass revival.
+  k->set_trace_id(EVENT_FLAGS_MASK(k));
+}
+
+// used by CDS / APPCDS as part of "restore_unshareable_info"
+void JfrTraceId::restore(const Klass* k) {
+  assert(k != NULL, "invariant");
+  if (IS_JDK_JFR_EVENT_KLASS(k)) {
+    found_jdk_jfr_event_klass = true;
+  }
+  const traceid event_flags = k->trace_id();
+  // get a fresh traceid and restore the original event flags
+  k->set_trace_id(next_class_id() | event_flags);
+}
+
+traceid JfrTraceId::get(jclass jc) {
+  assert(jc != NULL, "invariant");
+  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
+  const oop my_oop = JNIHandles::resolve(jc);
+  assert(my_oop != NULL, "invariant");
+  return get(java_lang_Class::as_Klass(my_oop));
+}
+
+traceid JfrTraceId::use(jclass jc, bool leakp /* false */) {
+  assert(jc != NULL, "invariant");
+  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
+  const oop my_oop = JNIHandles::resolve(jc);
+  assert(my_oop != NULL, "invariant");
+  return use(java_lang_Class::as_Klass(my_oop), leakp);
+}
+
+bool JfrTraceId::in_visible_set(const jclass jc) {
+  assert(jc != NULL, "invariant");
+  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
+  const oop mirror = JNIHandles::resolve(jc);
+  assert(mirror != NULL, "invariant");
+  return in_visible_set(java_lang_Class::as_Klass(mirror));
+}
+
+bool JfrTraceId::in_jdk_jfr_event_hierarchy(const jclass jc) {
+  assert(jc != NULL, "invariant");
+  const oop mirror = JNIHandles::resolve(jc);
+  assert(mirror != NULL, "invariant");
+  return in_jdk_jfr_event_hierarchy(java_lang_Class::as_Klass(mirror));
+}
+
+bool JfrTraceId::is_jdk_jfr_event_sub(const jclass jc) {
+  assert(jc != NULL, "invariant");
+  const oop mirror = JNIHandles::resolve(jc);
+  assert(mirror != NULL, "invariant");
+  return is_jdk_jfr_event_sub(java_lang_Class::as_Klass(mirror));
+}
+
+bool JfrTraceId::is_jdk_jfr_event(const jclass jc) {
+  assert(jc != NULL, "invariant");
+  const oop mirror = JNIHandles::resolve(jc);
+  assert(mirror != NULL, "invariant");
+  return is_jdk_jfr_event(java_lang_Class::as_Klass(mirror));
+}
+
+bool JfrTraceId::is_event_host(const jclass jc) {
+  assert(jc != NULL, "invariant");
+  const oop mirror = JNIHandles::resolve(jc);
+  assert(mirror != NULL, "invariant");
+  return is_event_host(java_lang_Class::as_Klass(mirror));
+}
+
+void JfrTraceId::tag_as_jdk_jfr_event_sub(const jclass jc) {
+  assert(jc != NULL, "invariant");
+  const oop mirror = JNIHandles::resolve(jc);
+  assert(mirror != NULL, "invariant");
+  const Klass* const k = java_lang_Class::as_Klass(mirror);
+  tag_as_jdk_jfr_event_sub(k);
+  assert(IS_JDK_JFR_EVENT_SUBKLASS(k), "invariant");
+}
+
+void JfrTraceId::tag_as_event_host(const jclass jc) {
+  assert(jc != NULL, "invariant");
+  const oop mirror = JNIHandles::resolve(jc);
+  assert(mirror != NULL, "invariant");
+  const Klass* const k = java_lang_Class::as_Klass(mirror);
+  tag_as_event_host(k);
+  assert(IS_EVENT_HOST_KLASS(k), "invariant");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_HPP
+
+#include "jni.h"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.hpp"
+
+class ClassLoaderData;
+class Klass;
+class Method;
+// XXX class PackageEntry;
+class Thread;
+
+/*
+ * JfrTraceId is a means of tagging, e.g. marking, specific instances as being actively in-use.
+ * The most common situation is a committed event that has a field that is referring to a specific instance.
+ * Now there exist a relation between an event (field) and an artifact of some kind.
+ * We track this relation during runtime using the JfrTraceId mechanism in order to reify it into the chunk
+ * where the event is finally written.
+ *
+ * It is the event commit mechanism that tags instances as in-use. The tag routines return the untagged traceid
+ * as a mapping key, and the commit mechanism writes the key into the event field.
+ * Consequently, the mechanism is opaque and not something a user needs to know about.
+ * Indeed, the API promotes using well-known JVM concepts directly in events, such as having a Klass* as an event field.
+ *
+ * Tagging allows for many-to-one mappings of constants, lazy evaluation / collection of tags during chunk rotation
+ * and concurrency (by using an epoch relative tagging scheme).
+ *
+ * JfrTraceId(s) have been added to support tagging instances of classes such as:
+ *
+ *   Klass (includes Method)
+ *   ClassLoaderData
+ *   XXX PackageEntry
+ *
+ * These classes have been extended to include a _traceid field (64-bits).
+ *
+ * Each instance is uniquely identified by a type-relative monotonic counter that is unique over the VM lifecycle.
+ * "Tagging an instance" essentially means to set contextually determined (by epoch) marker bits in the _traceid field.
+ * The constants associated with a tagged instance is a set of which is determined by a constant type definition,
+ * and these constants are then serialized in an upcoming checkpoint event for the relevant chunk.
+ *
+ * Note that a "tagging" is relative to a chunk. Having serialized the tagged instance, the tag bits are reset (for that epoch).
+ * As mentioned previously, the returned traceid is always the untagged value.
+ *
+ * We also use the _traceid field in Klass to quickly identify (bit check) if a newly loaded klass is of type jdk.jfr.Event.
+ * (see jfr/instrumentation/jfrEventClassTransformer.cpp)
+ *
+ *
+ * _traceid bit layout and description planned to go here
+ *
+ *
+ */
+
+class JfrTraceId : public AllStatic {
+ public:
+  static void assign(const Klass* klass);
+  // XXX static void assign(const PackageEntry* package);
+  static void assign(const ClassLoaderData* cld);
+  static traceid assign_thread_id();
+
+  static traceid get(const Klass* klass);
+  static traceid get(jclass jc);
+  static traceid get(const Thread* thread);
+
+  // tag construct as used, returns pre-tagged traceid
+  static traceid use(const Klass* klass, bool leakp = false);
+  static traceid use(jclass jc, bool leakp = false);
+  static traceid use(const Method* method, bool leakp = false);
+  // XXX static traceid use(const PackageEntry* package, bool leakp = false);
+  static traceid use(const ClassLoaderData* cld, bool leakp = false);
+
+  static void remove(const Klass* klass);
+  static void restore(const Klass* klass);
+
+  // set of event classes made visible to java
+  static bool in_visible_set(const Klass* k);
+  static bool in_visible_set(const jclass jc);
+
+  // jdk.jfr.Event
+  static bool is_jdk_jfr_event(const Klass* k);
+  static bool is_jdk_jfr_event(const jclass jc);
+  static void tag_as_jdk_jfr_event(const Klass* k);
+
+  // jdk.jfr.Event subklasses
+  static bool is_jdk_jfr_event_sub(const Klass* k);
+  static bool is_jdk_jfr_event_sub(const jclass jc);
+  static void tag_as_jdk_jfr_event_sub(const Klass* k);
+  static void tag_as_jdk_jfr_event_sub(const jclass jc);
+
+  static bool in_jdk_jfr_event_hierarchy(const Klass* k);
+  static bool in_jdk_jfr_event_hierarchy(const jclass jc);
+
+  // klasses that host an event
+  static bool is_event_host(const Klass* k);
+  static bool is_event_host(const jclass jc);
+  static void tag_as_event_host(const Klass* k);
+  static void tag_as_event_host(const jclass jc);
+};
+
+#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTYPEIDS_INLINE_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTYPEIDS_INLINE_HPP
+
+#include "classfile/classLoaderData.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp"
+#include "oops/arrayKlass.hpp"
+#include "oops/klass.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/method.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/debug.hpp"
+
+template <typename T>
+inline traceid set_used_and_get(const T* type, bool leakp) {
+  assert(type != NULL, "invariant");
+  if (leakp) {
+    SET_LEAKP_USED_THIS_EPOCH(type);
+    assert(LEAKP_USED_THIS_EPOCH(type), "invariant");
+  }
+  SET_USED_THIS_EPOCH(type);
+  assert(USED_THIS_EPOCH(type), "invariant");
+  return TRACE_ID_MASKED_PTR(type);
+}
+
+template <typename T>
+inline traceid set_used_and_get_shifted(const T* type, bool leakp) {
+  assert(type != NULL, "invariant");
+  return set_used_and_get(type, leakp) >> TRACE_ID_SHIFT;
+}
+
+inline traceid JfrTraceId::get(const Klass* klass) {
+  assert(klass != NULL, "invariant");
+  return TRACE_ID(klass);
+}
+
+inline traceid JfrTraceId::get(const Thread* t) {
+  assert(t != NULL, "invariant");
+  return TRACE_ID_RAW(t->jfr_thread_local());
+}
+
+inline traceid JfrTraceId::use(const Klass* klass, bool leakp /* false */) {
+  assert(klass != NULL, "invariant");
+  return set_used_and_get_shifted(klass, leakp);
+}
+
+inline traceid JfrTraceId::use(const Method* method, bool leakp /* false */) {
+  assert(method != NULL, "invariant");
+  SET_METHOD_FLAG_USED_THIS_EPOCH(method);
+  const Klass* const klass = method->method_holder();
+  assert(klass != NULL, "invariant");
+  if (leakp) {
+    SET_LEAKP_USED_THIS_EPOCH(klass);
+    assert(LEAKP_USED_THIS_EPOCH(klass), "invariant");
+  }
+  SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
+  assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
+  return (METHOD_ID(klass, method));
+}
+
+// XXX
+//inline traceid JfrTraceId::use(const PackageEntry* package, bool leakp /* false */) {
+//  assert(package != NULL, "invariant");
+//  return set_used_and_get_shifted(package, leakp);
+//}
+
+inline traceid JfrTraceId::use(const ClassLoaderData* cld, bool leakp /* false */) {
+  assert(cld != NULL, "invariant");
+  return cld->is_anonymous() ? 0 : set_used_and_get_shifted(cld, leakp);
+}
+
+inline bool JfrTraceId::in_visible_set(const Klass* klass) {
+  assert(klass != NULL, "invariant");
+  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
+  return (IS_JDK_JFR_EVENT_SUBKLASS(klass) && !klass->is_abstract()) || IS_EVENT_HOST_KLASS(klass);
+}
+
+inline bool JfrTraceId::is_jdk_jfr_event(const Klass* k) {
+  assert(k != NULL, "invariant");
+  return IS_JDK_JFR_EVENT_KLASS(k);
+}
+
+inline void JfrTraceId::tag_as_jdk_jfr_event(const Klass* klass) {
+  assert(klass != NULL, "invariant");
+  assert(IS_NOT_AN_EVENT_KLASS(klass), "invariant");
+  SET_TAG(klass, JDK_JFR_EVENT_KLASS);
+  assert(IS_JDK_JFR_EVENT_KLASS(klass), "invariant");
+  assert(IS_NOT_AN_EVENT_SUB_KLASS(klass), "invariant");
+}
+
+inline bool JfrTraceId::is_jdk_jfr_event_sub(const Klass* k) {
+  assert(k != NULL, "invariant");
+  return IS_JDK_JFR_EVENT_SUBKLASS(k);
+}
+
+inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) {
+  assert(k != NULL, "invariant");
+  if (IS_NOT_AN_EVENT_KLASS(k)) {
+    SET_TAG(k, JDK_JFR_EVENT_SUBKLASS);
+  }
+  assert(IS_JDK_JFR_EVENT_SUBKLASS(k), "invariant");
+}
+
+inline bool JfrTraceId::in_jdk_jfr_event_hierarchy(const Klass* klass) {
+  assert(klass != NULL, "invariant");
+  if (is_jdk_jfr_event(klass)) {
+    return true;
+  }
+  const Klass* const super = klass->super();
+  return super != NULL ? IS_EVENT_KLASS(super) : false;
+}
+
+inline bool JfrTraceId::is_event_host(const Klass* k) {
+  assert(k != NULL, "invariant");
+  return IS_EVENT_HOST_KLASS(k);
+}
+
+inline void JfrTraceId::tag_as_event_host(const Klass* k) {
+  assert(k != NULL, "invariant");
+  SET_TAG(k, EVENT_HOST_KLASS);
+  assert(IS_EVENT_HOST_KLASS(k), "invariant");
+}
+
+#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTYPEIDS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
+
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "utilities/macros.hpp"
+
+#ifdef VM_LITTLE_ENDIAN
+static const int low_offset = 0;
+static const int leakp_offset = low_offset + 1;
+#else
+static const int low_offset = 7;
+static const int leakp_offset = low_offset - 1;
+#endif
+
+inline void set_bits(jbyte bits, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  const jbyte current = OrderAccess::load_acquire(dest);
+  if (bits != (current & bits)) {
+    *dest |= bits;
+  }
+}
+
+inline void set_mask(jbyte mask, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  const jbyte current = OrderAccess::load_acquire(dest);
+  if (mask != (current & mask)) {
+    *dest &= mask;
+  }
+}
+
+inline void set_bits_cas(jbyte bits, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  do {
+    const jbyte current = OrderAccess::load_acquire(dest);
+    if (bits == (current & bits)) {
+      return;
+    }
+    const jbyte new_value = current | bits;
+    if (Atomic::cmpxchg(new_value, dest, current) == current) {
+      return;
+    }
+  } while (true);
+}
+
+inline void clear_bits_cas(jbyte bits, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  do {
+    const jbyte current = OrderAccess::load_acquire(dest);
+    if (bits != (current & bits)) {
+      return;
+    }
+    const jbyte new_value = current ^ bits;
+    if (Atomic::cmpxchg(new_value, dest, current) == current) {
+      return;
+    }
+  } while (true);
+}
+
+inline void set_traceid_bits(jbyte bits, traceid* dest) {
+  set_bits(bits, ((jbyte*)dest) + low_offset);
+}
+
+inline void set_traceid_bits_cas(jbyte bits, traceid* dest) {
+  set_bits_cas(bits, ((jbyte*)dest) + low_offset);
+}
+
+inline void set_traceid_mask(jbyte mask, traceid* dest) {
+  set_mask(mask, ((jbyte*)dest) + low_offset);
+}
+
+inline void set_leakp_traceid_bits(jbyte bits, traceid* dest) {
+  set_bits(bits, ((jbyte*)dest) + leakp_offset);
+}
+
+inline void set_leakp_traceid_bits_cas(jbyte bits, traceid* dest) {
+  set_bits_cas(bits, ((jbyte*)dest) + leakp_offset);
+}
+
+inline void set_leakp_traceid_mask(jbyte mask, traceid* dest) {
+  set_mask(mask, ((jbyte*)dest) + leakp_offset);
+}
+
+#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/orderAccess.hpp"
+
+// Alternating epochs on each rotation allow for concurrent tagging.
+// The regular epoch shift happens only during a safepoint.
+// The fence is there only for the emergency dump case which happens outside of safepoint.
+bool JfrTraceIdEpoch::_epoch_state = false;
+void JfrTraceIdEpoch::shift_epoch() {
+  _epoch_state = !_epoch_state;
+  if (!SafepointSynchronize::is_at_safepoint()) {
+    OrderAccess::fence();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDEPOCH_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDEPOCH_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+#define USED_BIT 1
+#define METHOD_USED_BIT (USED_BIT << 2)
+#define EPOCH_1_SHIFT 0
+#define EPOCH_2_SHIFT 1
+#define LEAKP_SHIFT 8
+
+#define USED_EPOCH_1_BIT (USED_BIT << EPOCH_1_SHIFT)
+#define USED_EPOCH_2_BIT (USED_BIT << EPOCH_2_SHIFT)
+#define LEAKP_USED_EPOCH_1_BIT (USED_EPOCH_1_BIT << LEAKP_SHIFT)
+#define LEAKP_USED_EPOCH_2_BIT (USED_EPOCH_2_BIT << LEAKP_SHIFT)
+#define METHOD_USED_EPOCH_1_BIT (METHOD_USED_BIT << EPOCH_1_SHIFT)
+#define METHOD_USED_EPOCH_2_BIT (METHOD_USED_BIT << EPOCH_2_SHIFT)
+#define METHOD_AND_CLASS_IN_USE_BITS (METHOD_USED_BIT | USED_BIT)
+#define METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_1_SHIFT)
+#define METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_2_SHIFT)
+
+class JfrTraceIdEpoch : AllStatic {
+  friend class JfrCheckpointManager;
+ private:
+  static bool _epoch_state;
+  static void shift_epoch();
+
+ public:
+  static u1 epoch() {
+    return _epoch_state;
+  }
+
+  static jlong epoch_address() {
+    return (jlong)&_epoch_state;
+  }
+
+  static u1 current() {
+    return _epoch_state ? (u1)1 : (u1)0;
+  }
+
+  static u1 previous() {
+    return _epoch_state ? (u1)0 : (u1)1;
+  }
+
+  static traceid in_use_this_epoch_bit() {
+    return _epoch_state ? USED_EPOCH_2_BIT : USED_EPOCH_1_BIT;
+  }
+
+  static traceid in_use_prev_epoch_bit() {
+    return _epoch_state ? USED_EPOCH_1_BIT : USED_EPOCH_2_BIT;
+  }
+
+  static traceid leakp_in_use_this_epoch_bit() {
+    return _epoch_state ? LEAKP_USED_EPOCH_2_BIT : LEAKP_USED_EPOCH_1_BIT;
+  }
+
+  static traceid leakp_in_use_prev_epoch_bit() {
+    return _epoch_state ? LEAKP_USED_EPOCH_1_BIT : LEAKP_USED_EPOCH_2_BIT;
+  }
+
+  static traceid method_in_use_this_epoch_bit() {
+    return _epoch_state ? METHOD_USED_EPOCH_2_BIT : METHOD_USED_EPOCH_1_BIT;
+  }
+
+  static traceid method_in_use_prev_epoch_bit() {
+    return _epoch_state ? METHOD_USED_EPOCH_1_BIT : METHOD_USED_EPOCH_2_BIT;
+  }
+
+  static traceid method_and_class_in_use_this_epoch_bits() {
+    return _epoch_state ? METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS : METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS;
+  }
+
+  static traceid method_and_class_in_use_prev_epoch_bits() {
+    return _epoch_state ? METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS :  METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS;
+  }
+};
+
+#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDEPOCH_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
+#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
+
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
+#include "jfr/support/jfrKlassExtension.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+/**
+ *
+ * If a traceid is used, depending on epoch, either the first or the second bit is tagged.
+ * If a class member (method) is used, either the third or fourth bit is tagged.
+ * Which bit to set is a function of the epoch. This allows for concurrent tagging.
+ *
+ * LeakProfiler subsystem gets its own byte and uses the same tagging scheme but is shifted up 8.
+ *
+ * We also tag the individual method by using the TraceFlag field,
+ * (see jfr/support/jfrTraceIdExtension.hpp for details)
+ *
+ */
+
+// these are defined in jfr/support/jfrKlassExtension.hpp
+//
+// #define JDK_JFR_EVENT_SUBKLASS  16
+// #define JDK_JFR_EVENT_KLASS     32
+// #define EVENT_HOST_KLASS        64
+
+#define IS_JDK_JFR_EVENT_SUBKLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_SUBKLASS)) != 0)
+
+#define ANY_USED_BITS (USED_EPOCH_2_BIT         | \
+                       USED_EPOCH_1_BIT         | \
+                       METHOD_USED_EPOCH_2_BIT  | \
+                       METHOD_USED_EPOCH_1_BIT  | \
+                       LEAKP_USED_EPOCH_2_BIT   | \
+                       LEAKP_USED_EPOCH_1_BIT)
+
+#define TRACE_ID_META_BITS (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS | ANY_USED_BITS)
+
+#define ANY_EVENT                       (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
+#define IS_JDK_JFR_EVENT_KLASS(ptr)     (((ptr)->trace_id() & JDK_JFR_EVENT_KLASS) != 0)
+#define IS_EVENT_HOST_KLASS(ptr)        (((ptr)->trace_id() & EVENT_HOST_KLASS) != 0)
+#define IS_NOT_AN_EVENT_KLASS(ptr)      (!IS_EVENT_KLASS(ptr))
+#define IS_NOT_AN_EVENT_SUB_KLASS(ptr)  (!IS_JDK_JFR_EVENT_SUBKLASS(ptr))
+#define IS_NOT_JDK_JFR_EVENT_KLASS(ptr) (!IS_JDK_JFR_EVENT_KLASS(ptr))
+#define EVENT_FLAGS_MASK(ptr)           (((ptr)->trace_id() & ANY_EVENT) != 0)
+#define UNEVENT(ptr)                    ((ptr)->set_trace_id(((ptr)->trace_id()) & ~ANY_EVENT))
+
+#define TRACE_ID_SHIFT 16
+
+#define TRACE_ID_MASKED(id)             (id & ~TRACE_ID_META_BITS)
+#define TRACE_ID_VALUE(id)              (TRACE_ID_MASKED(id) >> TRACE_ID_SHIFT)
+#define TRACE_ID_MASKED_PTR(ptr)        (TRACE_ID_MASKED((ptr)->trace_id()))
+#define TRACE_ID_RAW(ptr)               ((ptr)->trace_id())
+#define TRACE_ID(ptr)                   (TRACE_ID_MASKED_PTR(ptr) >> TRACE_ID_SHIFT)
+#define METHOD_ID(kls, meth)            (TRACE_ID_MASKED_PTR(kls) | (meth)->method_idnum())
+#define SET_TAG(ptr, tag)               (set_traceid_bits(tag, (ptr)->trace_id_addr()))
+#define SET_LEAKP_TAG(ptr, tag)         (set_leakp_traceid_bits(tag, (ptr)->trace_id_addr()))
+#define SET_TAG_CAS(ptr, tag)           (set_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
+#define SET_LEAKP_TAG_CAS(ptr, tag)     (set_leakp_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
+
+#define IN_USE_THIS_EPOCH_BIT           (JfrTraceIdEpoch::in_use_this_epoch_bit())
+#define IN_USE_PREV_EPOCH_BIT           (JfrTraceIdEpoch::in_use_prev_epoch_bit())
+#define LEAKP_IN_USE_THIS_EPOCH_BIT     (JfrTraceIdEpoch::leakp_in_use_this_epoch_bit())
+#define LEAKP_IN_USE_PREV_EPOCH_BIT     (JfrTraceIdEpoch::leakp_in_use_prev_epoch_bit())
+
+#define METHOD_IN_USE_THIS_EPOCH_BIT    (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
+#define METHOD_IN_USE_PREV_EPOCH_BIT    (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
+#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
+#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
+
+#define UNUSE_THIS_EPOCH_MASK           (~(IN_USE_THIS_EPOCH_BIT))
+#define UNUSE_PREV_EPOCH_MASK           (~(IN_USE_PREV_EPOCH_BIT))
+#define LEAKP_UNUSE_THIS_EPOCH_MASK     UNUSE_THIS_EPOCH_MASK
+#define LEAKP_UNUSE_PREV_EPOCH_MASK     UNUSE_PREV_EPOCH_MASK
+
+#define UNUSE_METHOD_THIS_EPOCH_MASK    (~(METHOD_IN_USE_THIS_EPOCH_BIT))
+#define UNUSE_METHOD_PREV_EPOCH_MASK    (~(METHOD_IN_USE_PREV_EPOCH_BIT))
+#define LEAKP_UNUSE_METHOD_THIS_EPOCH_MASK (~(UNUSE_METHOD_THIS_EPOCH_MASK))
+#define LEAKP_UNUSE_METHOD_PREV_EPOCH_MASK (~UNUSE_METHOD_PREV_EPOCH_MASK))
+
+#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK (~(METHOD_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT))
+#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
+
+#define SET_USED_THIS_EPOCH(ptr)        (SET_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
+#define SET_USED_PREV_EPOCH(ptr)        (SET_TAG_CAS(ptr, IN_USE_PREV_EPOCH_BIT))
+#define SET_LEAKP_USED_THIS_EPOCH(ptr)  (SET_LEAKP_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
+#define SET_LEAKP_USED_PREV_EPOCH(ptr)  (SET_LEAKP_TAG(ptr, IN_USE_PREV_EPOCH_BIT))
+#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (SET_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
+
+#define USED_THIS_EPOCH(ptr)            (((ptr)->trace_id() & IN_USE_THIS_EPOCH_BIT) != 0)
+#define NOT_USED_THIS_EPOCH(ptr)        (!USED_THIS_EPOCH(ptr))
+#define USED_PREV_EPOCH(ptr)            (((ptr)->trace_id() & IN_USE_PREV_EPOCH_BIT) != 0)
+#define NOT_USED_PREV_EPOCH(ptr)        (!USED_PREV_EPOCH(ptr))
+#define USED_ANY_EPOCH(ptr)             (((ptr)->trace_id() & (USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)) != 0)
+#define NOT_USED_ANY_EPOCH(ptr)         (!USED_ANY_EPOCH(ptr))
+
+#define LEAKP_USED_THIS_EPOCH(ptr)      (((ptr)->trace_id() & LEAKP_IN_USE_THIS_EPOCH_BIT) != 0)
+#define LEAKP_NOT_USED_THIS_EPOCH(ptr)  (!LEAKP_USED_THIS_EPOCH(ptr))
+#define LEAKP_USED_PREV_EPOCH(ptr)      (((ptr)->trace_id() & LEAKP_IN_USE_PREV_EPOCH_BIT) != 0)
+#define LEAKP_NOT_USED_PREV_EPOCH(ptr)  (!LEAKP_USED_PREV_EPOCH(ptr))
+#define LEAKP_USED_ANY_EPOCH(ptr)       (((ptr)->trace_id() & (LEAKP_USED_EPOCH_2_BIT | LEAKP_USED_EPOCH_1_BIT)) != 0)
+#define LEAKP_NOT_USED_ANY_EPOCH(ptr)   (!LEAKP_USED_ANY_EPOCH(ptr))
+
+#define ANY_USED_THIS_EPOCH(ptr)        (((ptr)->trace_id() & (LEAKP_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT)) != 0)
+#define ANY_NOT_USED_THIS_EPOCH(ptr)    (!ANY_USED_THIS_EPOCH(ptr))
+#define ANY_USED_PREV_EPOCH(ptr)        (((ptr)->trace_id() & (LEAKP_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT)) != 0)
+#define ANY_NOT_USED_PREV_EPOCH(ptr)    (!ANY_USED_PREV_EPOCH(ptr))
+
+#define METHOD_USED_THIS_EPOCH(kls)     (((kls)->trace_id() & METHOD_IN_USE_THIS_EPOCH_BIT) != 0)
+#define METHOD_NOT_USED_THIS_EPOCH(kls) (!METHOD_USED_THIS_EPOCH(kls))
+#define METHOD_USED_PREV_EPOCH(kls)     (((kls)->trace_id() & METHOD_IN_USE_PREV_EPOCH_BIT) != 0)
+#define METHOD_NOT_USED_PREV_EPOCH(kls) (!METHOD_USED_PREV_EPOCH(kls))
+#define METHOD_USED_ANY_EPOCH(kls)      (((kls)->trace_id() & (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)) != 0)
+
+#define METHOD_NOT_USED_ANY_EPOCH(kls)  (!METHOD_USED_ANY_EPOCH(kls))
+
+#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) == \
+                                                                     METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) != 0)
+
+#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) == \
+                                                                     METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) != 0)
+
+#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls)     ((METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls)) != 0)
+#define METHOD_AND_CLASS_NOT_USED_ANY_EPOCH(kls) (!METHOD_AND_CLASS_USED_ANY_EPOCH(kls))
+
+#define LEAKP_METHOD_IN_USE_THIS_EPOCH  (LEAKP_IN_USE_THIS_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)
+#define LEAKP_METHOD_IN_USE_PREV_EPOCH  (LEAKP_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_PREV_EPOCH_BIT)
+#define LEAKP_METHOD_USED_THIS_EPOCH(ptr)  ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_THIS_EPOCH) == \
+                                                                  LEAKP_METHOD_IN_USE_THIS_EPOCH) != 0)
+#define LEAKP_METHOD_NOT_USED_THIS_EPOCH(kls) (!LEAKP_METHOD_USED_THIS_EPOCH(kls))
+#define LEAKP_METHOD_USED_PREV_EPOCH(ptr)  ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_PREV_EPOCH) == \
+                                                                  LEAKP_METHOD_IN_USE_PREV_EPOCH) != 0)
+#define LEAKP_METHOD_NOT_USED_PREV_EPOCH(kls) (!LEAKP_METHOD_USED_PREV_EPOCH(kls))
+
+#define UNUSE_THIS_EPOCH(ptr)           (set_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
+#define UNUSE_PREV_EPOCH(ptr)           (set_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
+#define UNUSE_METHOD_THIS_EPOCH(kls)    (set_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
+#define UNUSE_METHOD_PREV_EPOCH(kls)    (set_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
+
+#define LEAKP_UNUSE_THIS_EPOCH(ptr)     (set_leakp_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
+#define LEAKP_UNUSE_PREV_EPOCH(ptr)     (set_leakp_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
+#define LEAKP_UNUSE_METHOD_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
+#define LEAKP_UNUSE_METHOD_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
+
+#define ANY_USED(ptr)                   (((ptr)->trace_id() & ANY_USED_BITS) != 0)
+#define ANY_NOT_USED(ptr)               (!ANY_USED(ptr))
+
+#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
+#define LEAKP_UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
+#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
+#define LEAKP_UNUSE_METHODS_AND_CLASS_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
+
+#define METHOD_FLAG_USED_THIS_EPOCH(m)       ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit()))
+#define METHOD_FLAG_NOT_USED_THIS_EPOCH(m)   (!METHOD_FLAG_USED_THIS_EPOCH(m))
+#define SET_METHOD_FLAG_USED_THIS_EPOCH(m)   ((m)->set_trace_flag((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit()))
+#define METHOD_FLAG_USED_PREV_EPOCH(m)       ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit()))
+#define METHOD_FLAG_NOT_USED_PREV_EPOCH(m)   (!METHOD_FLAG_USED_PREV_EPOCH(m))
+#define METHOD_FLAG_USED_ANY_EPOCH(m)        ((METHOD_FLAG_USED_THIS_EPOCH(m) || METHOD_FLAG_USED_PREV_EPOCH(m)) != 0)
+#define METHOD_FLAG_NOT_USED_ANY_EPOCH(m)    ((METHOD_FLAG_NOT_USED_THIS_EPOCH(m) && METHOD_FLAG_NOT_USED_PREV_EPOCH(m)) != 0)
+#define CLEAR_METHOD_FLAG_USED_THIS_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit(), (m)->trace_flags_addr()))
+#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit(), (m)->trace_flags_addr()))
+
+#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/jfrEventSetting.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/jfrEventSetting.inline.hpp"
+
+JfrNativeSettings JfrEventSetting::_jvm_event_settings;
+
+bool JfrEventSetting::set_threshold(jlong id, jlong threshold_ticks) {
+  JfrEventId event_id = (JfrEventId)id;
+  assert(bounds_check_event(event_id), "invariant");
+  setting(event_id).threshold_ticks = threshold_ticks;
+  return true;
+}
+
+bool JfrEventSetting::set_cutoff(jlong id, jlong cutoff_ticks) {
+  JfrEventId event_id = (JfrEventId)id;
+  assert(bounds_check_event(event_id), "invariant");
+  setting(event_id).cutoff_ticks = cutoff_ticks;
+  return true;
+}
+
+void JfrEventSetting::set_stacktrace(jlong id, bool enabled) {
+  JfrEventId event_id = (JfrEventId)id;
+  assert(bounds_check_event(event_id), "invariant");
+  setting(event_id).stacktrace = enabled;
+}
+
+void JfrEventSetting::set_enabled(jlong id, bool enabled) {
+  JfrEventId event_id = (JfrEventId)id;
+  assert(bounds_check_event(event_id), "invariant");
+  setting(event_id).enabled = enabled;
+}
+
+#ifdef ASSERT
+bool JfrEventSetting::bounds_check_event(jlong id) {
+  if ((unsigned)id < NUM_RESERVED_EVENTS || (unsigned)id >= MaxJfrEventId) {
+    return false;
+  }
+  return true;
+}
+#endif // ASSERT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/jfrEventSetting.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_JFREVENTSETTING_HPP
+#define SHARE_VM_JFR_RECORDER_JFREVENTSETTING_HPP
+
+#include "jni.h"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfrfiles/jfrEventControl.hpp"
+
+//
+// Native event settings as an associative array using the event id as key.
+//
+class JfrEventSetting : AllStatic {
+ private:
+  static JfrNativeSettings _jvm_event_settings;
+  static jfrNativeEventSetting& setting(JfrEventId event_id);
+
+ public:
+  static void set_enabled(jlong event_id, bool enabled);
+  static bool is_enabled(JfrEventId event_id);
+  static void set_stacktrace(jlong event_id, bool enabled);
+  static bool has_stacktrace(JfrEventId event_id);
+  static bool set_threshold(jlong event_id, jlong threshold_ticks);
+  static jlong threshold(JfrEventId event_id);
+  static bool set_cutoff(jlong event_id, jlong cutoff_ticks);
+  static jlong cutoff(JfrEventId event_id);
+  DEBUG_ONLY(static bool bounds_check_event(jlong id);)
+};
+
+#endif //  SHARE_VM_JFR_RECORDER_JFREVENTSETTING_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/jfrEventSetting.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_JFREVENTSETTING_INLINE_HPP
+#define SHARE_VM_JFR_RECORDER_JFREVENTSETTING_INLINE_HPP
+
+#include "jfr/recorder/jfrEventSetting.hpp"
+
+inline jfrNativeEventSetting& JfrEventSetting::setting(JfrEventId event_id) {
+  return _jvm_event_settings.bits[event_id];
+}
+
+inline bool JfrEventSetting::is_enabled(JfrEventId event_id) {
+  return 0 != setting(event_id).enabled;
+}
+
+inline bool JfrEventSetting::has_stacktrace(JfrEventId event_id) {
+  return 0 != setting(event_id).stacktrace;
+}
+
+inline jlong JfrEventSetting::threshold(JfrEventId event_id) {
+  return setting(event_id).threshold_ticks;
+}
+
+inline jlong JfrEventSetting::cutoff(JfrEventId event_id) {
+  return setting(event_id).cutoff_ticks;
+}
+
+#endif // SHARE_VM_JFR_RECORDER_JFREVENTSETTING_INLINE_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/jfrRecorder.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/dcmd/jfrDcmds.hpp"
+#include "jfr/instrumentation/jfrJvmtiAgent.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/periodic/jfrOSInterface.hpp"
+#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/repository/jfrRepository.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/service/jfrPostBox.hpp"
+#include "jfr/recorder/service/jfrRecorderService.hpp"
+#include "jfr/recorder/service/jfrRecorderThread.hpp"
+#include "jfr/recorder/storage/jfrStorage.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/recorder/stringpool/jfrStringPool.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/writers/jfrJavaEventWriter.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/growableArray.hpp"
+
+bool JfrRecorder::_shutting_down = false;
+
+static bool is_disabled_on_command_line() {
+  static const size_t length = strlen("FlightRecorder");
+  static Flag* const flight_recorder_flag = Flag::find_flag("FlightRecorder", length);
+  assert(flight_recorder_flag != NULL, "invariant");
+  return flight_recorder_flag->is_command_line() ? !FlightRecorder : false;
+}
+
+bool JfrRecorder::is_disabled() {
+  return is_disabled_on_command_line();
+}
+
+static bool set_flight_recorder_flag(bool flag_value) {
+  CommandLineFlags::boolAtPut((char*)"FlightRecorder", &flag_value, Flag::MANAGEMENT);
+  return FlightRecorder;
+}
+
+static bool _enabled = false;
+
+static bool enable() {
+  assert(!_enabled, "invariant");
+  _enabled = set_flight_recorder_flag(true);
+  return _enabled;
+}
+
+bool JfrRecorder::is_enabled() {
+  return _enabled;
+}
+
+bool JfrRecorder::on_vm_init() {
+  if (!is_disabled()) {
+    if (FlightRecorder || StartFlightRecording != NULL) {
+      enable();
+    }
+  }
+  // fast time initialization
+  return JfrTime::initialize();
+}
+
+static GrowableArray<JfrStartFlightRecordingDCmd*>* dcmd_recordings_array = NULL;
+
+static void release_recordings() {
+  if (dcmd_recordings_array != NULL) {
+    const int length = dcmd_recordings_array->length();
+    for (int i = 0; i < length; ++i) {
+      delete dcmd_recordings_array->at(i);
+    }
+    delete dcmd_recordings_array;
+    dcmd_recordings_array = NULL;
+  }
+}
+
+static void teardown_startup_support() {
+  release_recordings();
+  JfrOptionSet::release_startup_recording_options();
+}
+
+// Parsing options here to detect errors as soon as possible
+static bool parse_recording_options(const char* options, JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS) {
+  assert(options != NULL, "invariant");
+  assert(dcmd_recording != NULL, "invariant");
+  CmdLine cmdline(options, strlen(options), true);
+  dcmd_recording->parse(&cmdline, ',', THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    CLEAR_PENDING_EXCEPTION;
+    return false;
+  }
+  return true;
+}
+
+static bool validate_recording_options(TRAPS) {
+  const GrowableArray<const char*>* options = JfrOptionSet::startup_recording_options();
+  if (options == NULL) {
+    return true;
+  }
+  const int length = options->length();
+  assert(length >= 1, "invariant");
+  assert(dcmd_recordings_array == NULL, "invariant");
+  dcmd_recordings_array = new (ResourceObj::C_HEAP, mtTracing)GrowableArray<JfrStartFlightRecordingDCmd*>(length, true, mtTracing);
+  assert(dcmd_recordings_array != NULL, "invariant");
+  for (int i = 0; i < length; ++i) {
+    JfrStartFlightRecordingDCmd* const dcmd_recording = new(ResourceObj::C_HEAP, mtTracing) JfrStartFlightRecordingDCmd(tty, true);
+    assert(dcmd_recording != NULL, "invariant");
+    dcmd_recordings_array->append(dcmd_recording);
+    if (!parse_recording_options(options->at(i), dcmd_recording, THREAD)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+static bool launch_recording(JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS) {
+  assert(dcmd_recording != NULL, "invariant");
+  if (LogJFR && Verbose) tty->print_cr("Starting a recording");
+  dcmd_recording->execute(DCmd_Source_Internal, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    if (LogJFR) tty->print_cr("Exception while starting a recording");
+    CLEAR_PENDING_EXCEPTION;
+    return false;
+  }
+  if (LogJFR && Verbose) tty->print_cr("Finished starting a recording");
+  return true;
+}
+
+static bool launch_recordings(TRAPS) {
+  bool result = true;
+  if (dcmd_recordings_array != NULL) {
+    const int length = dcmd_recordings_array->length();
+    assert(length >= 1, "invariant");
+    for (int i = 0; i < length; ++i) {
+      if (!launch_recording(dcmd_recordings_array->at(i), THREAD)) {
+        result = false;
+        break;
+      }
+    }
+  }
+  teardown_startup_support();
+  return result;
+}
+
+static bool is_cds_dump_requested() {
+  // we will not be able to launch recordings if a cds dump is being requested
+  if (DumpSharedSpaces && (JfrOptionSet::startup_recording_options() != NULL)) {
+    warning("JFR will be disabled during CDS dumping");
+    teardown_startup_support();
+    return true;
+  }
+  return false;
+}
+
+bool JfrRecorder::on_vm_start() {
+  if (is_cds_dump_requested()) {
+    return true;
+  }
+  Thread* const thread = Thread::current();
+  if (!JfrJavaEventWriter::has_required_classes(thread)) {
+    // assume it is compact profile of jfr.jar is missed for some reasons
+    // skip further initialization.
+    return true;
+  }
+  if (!JfrOptionSet::initialize(thread)) {
+    return false;
+  }
+  if (!register_jfr_dcmds()) {
+    return false;
+  }
+
+  if (!validate_recording_options(thread)) {
+    return false;
+  }
+  if (!JfrJavaEventWriter::initialize()) {
+    return false;
+  }
+  if (!JfrOptionSet::configure(thread)) {
+    return false;
+  }
+
+  if (!is_enabled()) {
+    return true;
+  }
+
+  return launch_recordings(thread);
+}
+
+static bool _created = false;
+
+//
+// Main entry point for starting Jfr functionality.
+// Non-protected initializations assume single-threaded setup.
+//
+bool JfrRecorder::create(bool simulate_failure) {
+  assert(!is_disabled(), "invariant");
+  assert(!is_created(), "invariant");
+  if (!is_enabled()) {
+    enable();
+  }
+  if (!create_components() || simulate_failure) {
+    destroy_components();
+    return false;
+  }
+  if (!create_recorder_thread()) {
+    destroy_components();
+    return false;
+  }
+  _created = true;
+  return true;
+}
+
+bool JfrRecorder::is_created() {
+  return _created;
+}
+
+bool JfrRecorder::create_components() {
+  ResourceMark rm;
+  HandleMark hm;
+
+  if (!create_jvmti_agent()) {
+    return false;
+  }
+  if (!create_post_box()) {
+    return false;
+  }
+  if (!create_chunk_repository()) {
+    return false;
+  }
+  if (!create_storage()) {
+    return false;
+  }
+  if (!create_checkpoint_manager()) {
+    return false;
+  }
+  if (!create_stacktrace_repository()) {
+    return false;
+  }
+  if (!create_os_interface()) {
+    return false;
+  }
+  if (!create_stringpool()) {
+    return false;
+  }
+  if (!create_thread_sampling()) {
+    return false;
+  }
+  return true;
+}
+
+// subsystems
+static JfrJvmtiAgent* _jvmti_agent = NULL;
+static JfrPostBox* _post_box = NULL;
+static JfrStorage* _storage = NULL;
+static JfrCheckpointManager* _checkpoint_manager = NULL;
+static JfrRepository* _repository = NULL;
+static JfrStackTraceRepository* _stack_trace_repository;
+static JfrStringPool* _stringpool = NULL;
+static JfrOSInterface* _os_interface = NULL;
+static JfrThreadSampling* _thread_sampling = NULL;
+
+bool JfrRecorder::create_jvmti_agent() {
+  return JfrOptionSet::allow_retransforms() ? JfrJvmtiAgent::create() : true;
+}
+
+bool JfrRecorder::create_post_box() {
+  assert(_post_box == NULL, "invariant");
+  _post_box = JfrPostBox::create();
+  return _post_box != NULL;
+}
+
+bool JfrRecorder::create_chunk_repository() {
+  assert(_repository == NULL, "invariant");
+  assert(_post_box != NULL, "invariant");
+  _repository = JfrRepository::create(*_post_box);
+  return _repository != NULL && _repository->initialize();
+}
+
+bool JfrRecorder::create_os_interface() {
+  assert(_os_interface == NULL, "invariant");
+  _os_interface = JfrOSInterface::create();
+  return _os_interface != NULL && _os_interface->initialize();
+}
+
+bool JfrRecorder::create_storage() {
+  assert(_repository != NULL, "invariant");
+  assert(_post_box != NULL, "invariant");
+  _storage = JfrStorage::create(_repository->chunkwriter(), *_post_box);
+  return _storage != NULL && _storage->initialize();
+}
+
+bool JfrRecorder::create_checkpoint_manager() {
+  assert(_checkpoint_manager == NULL, "invariant");
+  assert(_repository != NULL, "invariant");
+  _checkpoint_manager = JfrCheckpointManager::create(_repository->chunkwriter());
+  return _checkpoint_manager != NULL && _checkpoint_manager->initialize();
+}
+
+bool JfrRecorder::create_stacktrace_repository() {
+  assert(_stack_trace_repository == NULL, "invariant");
+  _stack_trace_repository = JfrStackTraceRepository::create();
+  return _stack_trace_repository != NULL && _stack_trace_repository->initialize();
+}
+
+bool JfrRecorder::create_stringpool() {
+  assert(_stringpool == NULL, "invariant");
+  assert(_repository != NULL, "invariant");
+  _stringpool = JfrStringPool::create(_repository->chunkwriter());
+  return _stringpool != NULL && _stringpool->initialize();
+}
+
+bool JfrRecorder::create_thread_sampling() {
+  assert(_thread_sampling == NULL, "invariant");
+  _thread_sampling = JfrThreadSampling::create();
+  return _thread_sampling != NULL;
+}
+
+void JfrRecorder::destroy_components() {
+  JfrJvmtiAgent::destroy();
+  if (_post_box != NULL) {
+    JfrPostBox::destroy();
+    _post_box = NULL;
+  }
+  if (_repository != NULL) {
+    JfrRepository::destroy();
+    _repository = NULL;
+  }
+  if (_storage != NULL) {
+    JfrStorage::destroy();
+    _storage = NULL;
+  }
+  if (_checkpoint_manager != NULL) {
+    JfrCheckpointManager::destroy();
+    _checkpoint_manager = NULL;
+  }
+  if (_stack_trace_repository != NULL) {
+    JfrStackTraceRepository::destroy();
+    _stack_trace_repository = NULL;
+  }
+  if (_stringpool != NULL) {
+    JfrStringPool::destroy();
+    _stringpool = NULL;
+  }
+  if (_os_interface != NULL) {
+    JfrOSInterface::destroy();
+    _os_interface = NULL;
+  }
+  if (_thread_sampling != NULL) {
+    JfrThreadSampling::destroy();
+    _thread_sampling = NULL;
+  }
+}
+
+bool JfrRecorder::create_recorder_thread() {
+  return JfrRecorderThread::start(_checkpoint_manager, _post_box, Thread::current());
+}
+
+void JfrRecorder::destroy() {
+  assert(is_created(), "invariant");
+  _post_box->post(MSG_SHUTDOWN);
+  JfrJvmtiAgent::destroy();
+}
+
+void JfrRecorder::on_recorder_thread_exit() {
+  assert(!is_recording(), "invariant");
+  // intent is to destroy the recorder instance and components,
+  // but need sensitive coordination not yet in place
+  //
+  // destroy_components();
+  //
+  if (LogJFR) tty->print_cr("Recorder thread STOPPED");
+}
+
+void JfrRecorder::start_recording() {
+  _post_box->post(MSG_START);
+}
+
+bool JfrRecorder::is_recording() {
+  return JfrRecorderService::is_recording();
+}
+
+void JfrRecorder::stop_recording() {
+  _post_box->post(MSG_STOP);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/jfrRecorder.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_JFRRECORDER_HPP
+#define SHARE_VM_JFR_RECORDER_JFRRECORDER_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JavaThread;
+class Thread;
+
+//
+// Represents the singleton instance of Flight Recorder.
+// Lifecycle management of recorder components.
+//
+class JfrRecorder : public JfrCHeapObj {
+  friend class Jfr;
+  friend void recorderthread_entry(JavaThread*, Thread*);
+ private:
+  static bool _shutting_down;
+
+  static bool create_checkpoint_manager();
+  static bool create_chunk_repository();
+  static bool create_jvmti_agent();
+  static bool create_os_interface();
+  static bool create_post_box();
+  static bool create_recorder_thread();
+  static bool create_stacktrace_repository();
+  static bool create_storage();
+  static bool create_stringpool();
+  static bool create_thread_sampling();
+  static bool create_components();
+  static void destroy_components();
+  static void on_recorder_thread_exit();
+  static bool on_vm_start();
+  static bool on_vm_init();
+
+ public:
+  static bool is_enabled();
+  static bool is_disabled();
+  static bool create(bool simulate_failure);
+  static bool is_created();
+  static void destroy();
+  static void start_recording();
+  static bool is_recording();
+  static void stop_recording();
+  static bool is_shutting_down() { return _shutting_down; }
+  static void set_is_shutting_down() { _shutting_down = true; }
+};
+
+#endif // SHARE_VM_JFR_RECORDER_JFRRECORDER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrChunkSizeNotifier.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/repository/jfrChunkSizeNotifier.hpp"
+
+size_t JfrChunkSizeNotifier::_chunk_size_threshold = 0;
+
+void JfrChunkSizeNotifier::set_chunk_size_threshold(size_t bytes) {
+  _chunk_size_threshold = bytes;
+}
+
+size_t JfrChunkSizeNotifier::chunk_size_threshold() {
+  return _chunk_size_threshold;
+}
+
+static jobject new_chunk_monitor = NULL;
+
+// lazy install
+static jobject get_new_chunk_monitor(Thread* thread) {
+  static bool initialized = false;
+  if (initialized) {
+    assert(new_chunk_monitor != NULL, "invariant");
+    return new_chunk_monitor;
+  }
+  assert(new_chunk_monitor == NULL, "invariant");
+  // read static field
+  HandleMark hm(thread);
+  static const char klass[] = "jdk/jfr/internal/JVM";
+  static const char field[] = "FILE_DELTA_CHANGE";
+  static const char signature[] = "Ljava/lang/Object;";
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments field_args(&result, klass, field, signature, thread);
+  JfrJavaSupport::get_field_global_ref(&field_args, thread);
+  new_chunk_monitor = result.get_jobject();
+  initialized = new_chunk_monitor != NULL;
+  return new_chunk_monitor;
+}
+
+void JfrChunkSizeNotifier::notify() {
+  Thread* const thread = Thread::current();
+  JfrJavaSupport::notify_all(get_new_chunk_monitor(thread), thread);
+}
+
+void JfrChunkSizeNotifier::release_monitor() {
+  if (new_chunk_monitor != NULL) {
+    JfrJavaSupport::destroy_global_jni_handle(new_chunk_monitor);
+    new_chunk_monitor = NULL;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrChunkSizeNotifier.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_REPOSITORY_JFRRCHUNKSIZENOTIFIER_HPP
+#define SHARE_VM_JFR_RECORDER_REPOSITORY_JFRRCHUNKSIZENOTIFIER_HPP
+
+#include "memory/allocation.hpp"
+
+//
+// Responsible for notifications about current chunk size now exceeding threshold.
+// This is a means to initiate a chunk rotation on the basis of size written.
+//
+class JfrChunkSizeNotifier : AllStatic {
+  friend class JfrRecorder;
+ private:
+  static size_t _chunk_size_threshold;
+  static void release_monitor();
+ public:
+  static void set_chunk_size_threshold(size_t bytes);
+  static size_t chunk_size_threshold();
+  static void notify();
+};
+
+#endif // SHARE_VM_JFR_RECORDER_REPOSITORY_JFRRCHUNKSIZENOTIFIER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrChunkState.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/dcmd/jfrDcmds.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/repository/jfrChunkState.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/utilities/jfrTimeConverter.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+
+JfrChunkState::JfrChunkState() :
+  _path(NULL),
+  _start_ticks(0),
+  _start_nanos(0),
+  _previous_start_ticks(0),
+  _previous_start_nanos(0),
+  _previous_checkpoint_offset(0) {}
+
+JfrChunkState::~JfrChunkState() {
+  reset();
+}
+
+void JfrChunkState::reset() {
+  if (_path != NULL) {
+    JfrCHeapObj::free(_path, strlen(_path) + 1);
+    _path = NULL;
+  }
+  set_previous_checkpoint_offset(0);
+}
+
+void JfrChunkState::set_previous_checkpoint_offset(jlong offset) {
+  _previous_checkpoint_offset = offset;
+}
+
+jlong JfrChunkState::previous_checkpoint_offset() const {
+  return _previous_checkpoint_offset;
+}
+
+jlong JfrChunkState::previous_start_ticks() const {
+  return _previous_start_ticks;
+}
+
+jlong JfrChunkState::previous_start_nanos() const {
+  return _previous_start_nanos;
+}
+
+void JfrChunkState::update_start_ticks() {
+  _start_ticks = JfrTicks::now();
+}
+
+void JfrChunkState::update_start_nanos() {
+  _start_nanos = (jlong)(os::javaTimeMillis() * JfrTimeConverter::NANOS_PER_MILLISEC);
+}
+
+void JfrChunkState::save_current_and_update_start_ticks() {
+  _previous_start_ticks = _start_ticks;
+  update_start_ticks();
+}
+
+void JfrChunkState::save_current_and_update_start_nanos() {
+  _previous_start_nanos = _start_nanos;
+  update_start_nanos();
+}
+
+void JfrChunkState::update_time_to_now() {
+  save_current_and_update_start_nanos();
+  save_current_and_update_start_ticks();
+}
+
+jlong JfrChunkState::last_chunk_duration() const {
+  return _start_nanos - _previous_start_nanos;
+}
+
+static char* copy_path(const char* path) {
+  assert(path != NULL, "invariant");
+  const size_t path_len = strlen(path);
+  char* new_path = JfrCHeapObj::new_array<char>(path_len + 1);
+  strncpy(new_path, path, path_len);
+  new_path[path_len] = '\0';
+  return new_path;
+}
+
+void JfrChunkState::set_path(const char* path) {
+  assert(JfrStream_lock->owned_by_self(), "invariant");
+  if (_path != NULL) {
+    JfrCHeapObj::free(_path, strlen(_path) + 1);
+    _path = NULL;
+  }
+  if (path != NULL) {
+    _path = copy_path(path);
+  }
+}
+
+const char* JfrChunkState::path() const {
+  return _path;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrChunkState.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_REPOSITORY_JFRRCHUNKSTATE_HPP
+#define SHARE_VM_JFR_RECORDER_REPOSITORY_JFRRCHUNKSTATE_HPP
+
+#include "jni.h"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+class JfrChunkState : public JfrCHeapObj {
+  friend class JfrChunkWriter;
+ private:
+  char* _path;
+  jlong _start_ticks;
+  jlong _start_nanos;
+  jlong _previous_start_ticks;
+  jlong _previous_start_nanos;
+  jlong _previous_checkpoint_offset;
+
+  void update_start_ticks();
+  void update_start_nanos();
+  void save_current_and_update_start_ticks();
+  void save_current_and_update_start_nanos();
+
+  JfrChunkState();
+  ~JfrChunkState();
+  void reset();
+  jlong previous_checkpoint_offset() const;
+  void set_previous_checkpoint_offset(jlong offset);
+  jlong previous_start_ticks() const;
+  jlong previous_start_nanos() const;
+  jlong last_chunk_duration() const;
+  void update_time_to_now();
+  void set_path(const char* path);
+  const char* path() const;
+};
+
+#endif // SHARE_VM_JFR_RECORDER_REPOSITORY_JFRRCHUNKSTATE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrChunkWriter.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/repository/jfrChunkState.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+#include "runtime/os.hpp"
+
+const u2 JFR_VERSION_MAJOR = 2;
+const u2 JFR_VERSION_MINOR = 0;
+
+static const size_t MAGIC_LEN = 4;
+static const size_t FILEHEADER_SLOT_SIZE = 8;
+static const size_t CHUNK_SIZE_OFFSET = 8;
+
+JfrChunkWriter::JfrChunkWriter() : JfrChunkWriterBase(NULL), _chunkstate(NULL) {}
+
+bool JfrChunkWriter::initialize() {
+  assert(_chunkstate == NULL, "invariant");
+  _chunkstate = new JfrChunkState();
+  return _chunkstate != NULL;
+}
+
+static fio_fd open_existing(const char* path) {
+  return os::open(path, O_RDWR, S_IREAD | S_IWRITE);
+}
+
+static fio_fd open_chunk(const char* path) {
+  assert(JfrStream_lock->owned_by_self(), "invariant");
+  return path != NULL ? open_existing(path) : invalid_fd;
+}
+
+bool JfrChunkWriter::open() {
+  assert(_chunkstate != NULL, "invariant");
+  JfrChunkWriterBase::reset(open_chunk(_chunkstate->path()));
+  const bool is_open = this->has_valid_fd();
+  if (is_open) {
+    this->bytes("FLR", MAGIC_LEN);
+    this->be_write((u2)JFR_VERSION_MAJOR);
+    this->be_write((u2)JFR_VERSION_MINOR);
+    this->reserve(6 * FILEHEADER_SLOT_SIZE);
+    // u8 chunk_size
+    // u8 initial checkpoint offset
+    // u8 metadata section offset
+    // u8 chunk start nanos
+    // u8 chunk duration nanos
+    // u8 chunk start ticks
+    this->be_write(JfrTime::frequency());
+    // chunk capabilities, CompressedIntegers etc
+    this->be_write((u4)JfrOptionSet::compressed_integers() ? 1 : 0);
+    _chunkstate->reset();
+  }
+  return is_open;
+}
+
+size_t JfrChunkWriter::close(intptr_t metadata_offset) {
+  write_header(metadata_offset);
+  this->flush();
+  this->close_fd();
+  return size_written();
+}
+
+void JfrChunkWriter::write_header(intptr_t metadata_offset) {
+  assert(this->is_valid(), "invariant");
+  // Chunk size
+  this->write_be_at_offset((jlong)size_written(), CHUNK_SIZE_OFFSET);
+  // initial checkpoint event offset
+  this->write_be_at_offset(_chunkstate->previous_checkpoint_offset(), CHUNK_SIZE_OFFSET + (1 * FILEHEADER_SLOT_SIZE));
+  // metadata event offset
+  this->write_be_at_offset((jlong)metadata_offset, CHUNK_SIZE_OFFSET + (2 * FILEHEADER_SLOT_SIZE));
+  // start of chunk in nanos since epoch
+  this->write_be_at_offset(_chunkstate->previous_start_nanos(), CHUNK_SIZE_OFFSET + (3 * FILEHEADER_SLOT_SIZE));
+  // duration of chunk in nanos
+  this->write_be_at_offset(_chunkstate->last_chunk_duration(), CHUNK_SIZE_OFFSET + (4 * FILEHEADER_SLOT_SIZE));
+  // start of chunk in ticks
+  this->write_be_at_offset(_chunkstate->previous_start_ticks(), CHUNK_SIZE_OFFSET + (5 * FILEHEADER_SLOT_SIZE));
+}
+
+void JfrChunkWriter::set_chunk_path(const char* chunk_path) {
+  _chunkstate->set_path(chunk_path);
+}
+
+intptr_t JfrChunkWriter::size_written() const {
+  return this->is_valid() ? this->current_offset() : 0;
+}
+
+intptr_t JfrChunkWriter::previous_checkpoint_offset() const {
+  return _chunkstate->previous_checkpoint_offset();
+}
+
+void JfrChunkWriter::set_previous_checkpoint_offset(intptr_t offset) {
+  _chunkstate->set_previous_checkpoint_offset(offset);
+}
+
+void JfrChunkWriter::time_stamp_chunk_now() {
+  _chunkstate->update_time_to_now();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrChunkWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_REPOSITORY_JFRCHUNKWRITER_HPP
+#define SHARE_VM_JFR_RECORDER_REPOSITORY_JFRCHUNKWRITER_HPP
+
+#include "jfr/writers/jfrStorageAdapter.hpp"
+#include "jfr/writers/jfrStreamWriterHost.inline.hpp"
+#include "jfr/writers/jfrWriterHost.inline.hpp"
+
+typedef MallocAdapter<M> JfrStreamBuffer; // 1 mb buffered writes
+typedef StreamWriterHost<JfrStreamBuffer, JfrCHeapObj> JfrBufferedStreamWriter;
+typedef WriterHost<BigEndianEncoder, CompressedIntegerEncoder, JfrBufferedStreamWriter> JfrChunkWriterBase;
+
+class JfrChunkState;
+
+class JfrChunkWriter : public JfrChunkWriterBase {
+  friend class JfrRepository;
+ private:
+  JfrChunkState* _chunkstate;
+
+  bool open();
+  size_t close(intptr_t metadata_offset);
+  void write_header(intptr_t metadata_offset);
+  void set_chunk_path(const char* chunk_path);
+
+ public:
+  JfrChunkWriter();
+  bool initialize();
+  intptr_t size_written() const;
+  intptr_t previous_checkpoint_offset() const;
+  void set_previous_checkpoint_offset(intptr_t offset);
+  void time_stamp_chunk_now();
+};
+
+#endif // SHARE_VM_JFR_RECORDER_REPOSITORY_JFRCHUNKWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrEmergencyDump.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
+#include "jfr/recorder/service/jfrPostBox.hpp"
+#include "jfr/recorder/service/jfrRecorderService.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/handles.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.hpp"
+
+/*
+* We are just about to exit the VM, so we will be very aggressive
+* at this point in order to increase overall success of dumping jfr data:
+*
+* 1. if the thread state is not "_thread_in_vm", we will quick transition
+*    it to "_thread_in_vm".
+* 2. the nesting state for both resource and handle areas are unknown,
+*    so we allocate new fresh arenas, discarding the old ones.
+* 3. if the thread is the owner of some critical lock(s), unlock them.
+*
+* If we end up deadlocking in the attempt of dumping out jfr data,
+* we rely on the WatcherThread task "is_error_reported()",
+* to exit the VM after a hard-coded timeout.
+* This "safety net" somewhat explains the aggressiveness in this attempt.
+*
+*/
+static void prepare_for_emergency_dump(Thread* thread) {
+  if (thread->is_Java_thread()) {
+    ((JavaThread*)thread)->set_thread_state(_thread_in_vm);
+  }
+
+#ifdef ASSERT
+  Monitor* owned_lock = thread->owned_locks();
+  while (owned_lock != NULL) {
+    Monitor* next = owned_lock->next();
+    owned_lock->unlock();
+    owned_lock = next;
+  }
+#endif // ASSERT
+
+  if (Threads_lock->owned_by_self()) {
+    Threads_lock->unlock();
+  }
+
+  // XXX (Module_lock -> PackageTable_lock)
+ if (PackageTable_lock->owned_by_self()) {
+   PackageTable_lock->unlock();
+ }
+
+  if (Heap_lock->owned_by_self()) {
+    Heap_lock->unlock();
+  }
+
+  if (Safepoint_lock->owned_by_self()) {
+    Safepoint_lock->unlock();
+  }
+
+  if (VMOperationQueue_lock->owned_by_self()) {
+    VMOperationQueue_lock->unlock();
+  }
+
+  if (VMOperationRequest_lock->owned_by_self()) {
+    VMOperationRequest_lock->unlock();
+  }
+
+
+  if (Service_lock->owned_by_self()) {
+    Service_lock->unlock();
+  }
+
+  if (CodeCache_lock->owned_by_self()) {
+    CodeCache_lock->unlock();
+  }
+
+  if (PeriodicTask_lock->owned_by_self()) {
+    PeriodicTask_lock->unlock();
+  }
+
+  if (JfrMsg_lock->owned_by_self()) {
+    JfrMsg_lock->unlock();
+  }
+
+  if (JfrBuffer_lock->owned_by_self()) {
+    JfrBuffer_lock->unlock();
+  }
+
+  if (JfrStream_lock->owned_by_self()) {
+    JfrStream_lock->unlock();
+  }
+
+  if (JfrStacktrace_lock->owned_by_self()) {
+    JfrStacktrace_lock->unlock();
+  }
+}
+
+static volatile int jfr_shutdown_lock = 0;
+
+static bool guard_reentrancy() {
+  return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
+}
+
+void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
+  if (!guard_reentrancy()) {
+    return;
+  }
+  // function made non-reentrant
+  Thread* thread = Thread::current();
+  if (exception_handler) {
+    // we are crashing
+    if (thread->is_Watcher_thread()) {
+      // The Watcher thread runs the periodic thread sampling task.
+      // If it has crashed, it is likely that another thread is
+      // left in a suspended state. This would mean the system
+      // will not be able to ever move to a safepoint. We try
+      // to avoid issuing safepoint operations when attempting
+      // an emergency dump, but a safepoint might be already pending.
+      return;
+    }
+    prepare_for_emergency_dump(thread);
+  }
+  EventDumpReason event;
+  if (event.should_commit()) {
+    event.set_reason(exception_handler ? "Crash" : "Out of Memory");
+    event.set_recordingId(-1);
+    event.commit();
+  }
+  if (!exception_handler) {
+    // OOM
+    LeakProfiler::emit_events(max_jlong, false);
+  }
+  const int messages = MSGBIT(MSG_VM_ERROR);
+  ResourceMark rm(thread);
+  HandleMark hm(thread);
+  JfrRecorderService service;
+  service.rotate(messages);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrEmergencyDump.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_INTERNAL_JFREMERGENCY_HPP
+#define SHARE_VM_JFR_RECORDER_INTERNAL_JFREMERGENCY_HPP
+
+#include "memory/allocation.hpp"
+
+//
+// Responsible for creating an hs_err<pid>.jfr file in exceptional shutdown situations (crash, OOM)
+//
+class JfrEmergencyDump : AllStatic {
+ public:
+  static void on_vm_shutdown(bool exception_handler);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_INTERNAL_JFREMERGENCY_HPP
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrRepository.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/repository/jfrChunkState.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/repository/jfrRepository.hpp"
+#include "jfr/recorder/service/jfrPostBox.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+
+static JfrRepository* _instance = NULL;
+
+JfrRepository& JfrRepository::instance() {
+  return *_instance;
+}
+
+static JfrChunkWriter* _chunkwriter = NULL;
+
+static bool initialize_chunkwriter() {
+  assert(_chunkwriter == NULL, "invariant");
+  _chunkwriter = new JfrChunkWriter();
+  return _chunkwriter != NULL && _chunkwriter->initialize();
+}
+
+JfrChunkWriter& JfrRepository::chunkwriter() {
+  return *_chunkwriter;
+}
+
+JfrRepository::JfrRepository(JfrPostBox& post_box) : _path(NULL), _post_box(post_box) {}
+
+bool JfrRepository::initialize() {
+  return initialize_chunkwriter();
+}
+
+JfrRepository::~JfrRepository() {
+  if (_path != NULL) {
+    JfrCHeapObj::free(_path, strlen(_path) + 1);
+    _path = NULL;
+  }
+
+  if (_chunkwriter != NULL) {
+    delete _chunkwriter;
+    _chunkwriter = NULL;
+  }
+}
+
+JfrRepository* JfrRepository::create(JfrPostBox& post_box) {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrRepository(post_box);
+  return _instance;
+}
+
+void JfrRepository::destroy() {
+  assert(_instance != NULL, "invariant");
+  delete _instance;
+  _instance = NULL;
+}
+
+static const char vm_error_filename_fmt[] = "hs_err_pid%p.jfr";
+static const char vm_oom_filename_fmt[] = "hs_oom_pid%p.jfr";
+static const char vm_soe_filename_fmt[] = "hs_soe_pid%p.jfr";
+static const char chunk_file_jfr_ext[] = ".jfr";
+static const size_t iso8601_len = 19; // "YYYY-MM-DDTHH:MM:SS"
+
+static fio_fd open_exclusivly(const char* path) {
+  return os::open(path, O_CREAT | O_WRONLY, S_IREAD | S_IWRITE);
+}
+
+static fio_fd open_existing(const char* path) {
+  return os::open(path, O_RDWR, S_IREAD | S_IWRITE);
+}
+
+static int file_sort(const char** const file1, const char** file2) {
+  assert(NULL != *file1 && NULL != *file2, "invariant");
+  int cmp = strncmp(*file1, *file2, iso8601_len);
+  if (0 == cmp) {
+    const char* const dot1 = strchr(*file1, '.');
+    assert(NULL != dot1, "invariant");
+    const char* const dot2 = strchr(*file2, '.');
+    assert(NULL != dot2, "invariant");
+    ptrdiff_t file1_len = dot1 - *file1;
+    ptrdiff_t file2_len = dot2 - *file2;
+    if (file1_len < file2_len) {
+      return -1;
+    }
+    if (file1_len > file2_len) {
+      return 1;
+    }
+    assert(file1_len == file2_len, "invariant");
+    cmp = strncmp(*file1, *file2, file1_len);
+  }
+  assert(cmp != 0, "invariant");
+  return cmp;
+}
+
+static void iso8601_to_date_time(char* iso8601_str) {
+  assert(iso8601_str != NULL, "invariant");
+  assert(strlen(iso8601_str) == iso8601_len, "invariant");
+  // "YYYY-MM-DDTHH:MM:SS"
+  for (size_t i = 0; i < iso8601_len; ++i) {
+    switch(iso8601_str[i]) {
+      case 'T' :
+      case '-' :
+      case ':' :
+        iso8601_str[i] = '_';
+        break;
+    }
+  }
+  // "YYYY_MM_DD_HH_MM_SS"
+}
+
+static void date_time(char* buffer, size_t buffer_len) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer_len >= iso8601_len, "buffer too small");
+  os::iso8601_time(buffer, buffer_len);
+  assert(strlen(buffer) >= iso8601_len + 1, "invariant");
+  // "YYYY-MM-DDTHH:MM:SS"
+  buffer[iso8601_len] = '\0';
+  iso8601_to_date_time(buffer);
+}
+
+static jlong file_size(fio_fd fd) {
+  assert(fd != invalid_fd, "invariant");
+  const jlong current_offset = os::current_file_offset(fd);
+  const jlong size = os::lseek(fd, 0, SEEK_END);
+  os::seek_to_file_offset(fd, current_offset);
+  return size;
+}
+
+class RepositoryIterator : public StackObj {
+ private:
+  const char* const _repo;
+  const size_t _repository_len;
+  GrowableArray<const char*>* _files;
+  const char* const fully_qualified(const char* entry) const;
+  mutable int _iterator;
+
+ public:
+   RepositoryIterator(const char* repository, size_t repository_len);
+   ~RepositoryIterator() {}
+  debug_only(void print_repository_files() const;)
+  const char* const filter(const char* entry) const;
+  bool has_next() const;
+  const char* const next() const;
+};
+
+const char* const RepositoryIterator::fully_qualified(const char* entry) const {
+  assert(NULL != entry, "invariant");
+  char* file_path_entry = NULL;
+   // only use files that have content, not placeholders
+  const char* const file_separator = os::file_separator();
+  if (NULL != file_separator) {
+    const size_t entry_len = strlen(entry);
+    const size_t file_separator_length = strlen(file_separator);
+    const size_t file_path_entry_length = _repository_len + file_separator_length + entry_len;
+    file_path_entry = NEW_RESOURCE_ARRAY_RETURN_NULL(char, file_path_entry_length + 1);
+    if (NULL == file_path_entry) {
+      return NULL;
+    }
+    int position = 0;
+    position += jio_snprintf(&file_path_entry[position], _repository_len + 1, "%s", _repo);
+    position += jio_snprintf(&file_path_entry[position], file_separator_length + 1, "%s", os::file_separator());
+    position += jio_snprintf(&file_path_entry[position], entry_len + 1, "%s", entry);
+    file_path_entry[position] = '\0';
+    assert((size_t)position == file_path_entry_length, "invariant");
+    assert(strlen(file_path_entry) == (size_t)position, "invariant");
+  }
+  return file_path_entry;
+}
+
+const char* const RepositoryIterator::filter(const char* entry) const {
+  if (entry == NULL) {
+    return NULL;
+  }
+  const size_t entry_len = strlen(entry);
+  if (entry_len <= 2) {
+    // for "." and ".."
+    return NULL;
+  }
+  char* entry_name = NEW_RESOURCE_ARRAY_RETURN_NULL(char, entry_len + 1);
+  if (entry_name == NULL) {
+    return NULL;
+  }
+  strncpy(entry_name, entry, entry_len);
+  entry_name[entry_len] = '\0';
+  const char* const fully_qualified_path_entry = fully_qualified(entry_name);
+  if (NULL == fully_qualified_path_entry) {
+    return NULL;
+  }
+  const fio_fd entry_fd = open_existing(fully_qualified_path_entry);
+  if (invalid_fd == entry_fd) {
+    return NULL;
+  }
+  const jlong entry_size = file_size(entry_fd);
+  os::close(entry_fd);
+  if (0 == entry_size) {
+    return NULL;
+  }
+  return entry_name;
+}
+
+RepositoryIterator::RepositoryIterator(const char* repository, size_t repository_len) :
+  _repo(repository),
+  _repository_len(repository_len),
+  _files(NULL),
+  _iterator(0) {
+  if (NULL != _repo) {
+    assert(strlen(_repo) == _repository_len, "invariant");
+    _files = new GrowableArray<const char*>(10);
+    DIR* dirp = os::opendir(_repo);
+    if (dirp == NULL) {
+      if (true) tty->print_cr("Unable to open repository %s", _repo);
+      return;
+    }
+    struct dirent* dentry;
+    while ((dentry = os::readdir(dirp)) != NULL) {
+      const char* const entry_path = filter(dentry->d_name);
+      if (NULL != entry_path) {
+        _files->append(entry_path);
+      }
+    }
+    os::closedir(dirp);
+    if (_files->length() > 1) {
+      _files->sort(file_sort);
+    }
+  }
+}
+
+#ifdef ASSERT
+void RepositoryIterator::print_repository_files() const {
+  while (has_next()) {
+    if (true) tty->print_cr( "%s", next());
+  }
+}
+#endif
+bool RepositoryIterator::has_next() const {
+  return (_files != NULL && _iterator < _files->length());
+}
+
+const char* const RepositoryIterator::next() const {
+  return _iterator >= _files->length() ? NULL : fully_qualified(_files->at(_iterator++));
+}
+
+static void write_emergency_file(fio_fd emergency_fd, const RepositoryIterator& iterator) {
+  assert(emergency_fd != invalid_fd, "invariant");
+  const size_t size_of_file_copy_block = 1 * M; // 1 mb
+  jbyte* const file_copy_block = NEW_RESOURCE_ARRAY_RETURN_NULL(jbyte, size_of_file_copy_block);
+  if (file_copy_block == NULL) {
+    return;
+  }
+ jlong bytes_written_total = 0;
+  while (iterator.has_next()) {
+    fio_fd current_fd = invalid_fd;
+    const char* const fqn = iterator.next();
+    if (fqn != NULL) {
+      current_fd = open_existing(fqn);
+      if (current_fd != invalid_fd) {
+        const jlong current_filesize = file_size(current_fd);
+        assert(current_filesize > 0, "invariant");
+        jlong bytes_read = 0;
+        jlong bytes_written = 0;
+        while (bytes_read < current_filesize) {
+          bytes_read += (jlong)os::read_at(current_fd, file_copy_block, size_of_file_copy_block, bytes_read);
+          assert(bytes_read - bytes_written <= (jlong)size_of_file_copy_block, "invariant");
+          bytes_written += (jlong)os::write(emergency_fd, file_copy_block, bytes_read - bytes_written);
+          assert(bytes_read == bytes_written, "invariant");
+        }
+        os::close(current_fd);
+        bytes_written_total += bytes_written;
+      }
+    }
+  }
+}
+
+static const char* create_emergency_dump_path() {
+  assert(JfrStream_lock->owned_by_self(), "invariant");
+  char* buffer = NEW_RESOURCE_ARRAY_RETURN_NULL(char, O_BUFLEN);
+  if (NULL == buffer) {
+    return NULL;
+  }
+  const char* const cwd = os::get_current_directory(buffer, O_BUFLEN);
+  if (NULL == cwd) {
+    return NULL;
+  }
+  size_t pos = strlen(cwd);
+  const int fsep_len = jio_snprintf(&buffer[pos], O_BUFLEN - pos, "%s", os::file_separator());
+  const char* filename_fmt = NULL;
+  // fetch specific error cause
+  switch (JfrJavaSupport::cause()) {
+    case JfrJavaSupport::OUT_OF_MEMORY:
+      filename_fmt = vm_oom_filename_fmt;
+      break;
+    case JfrJavaSupport::STACK_OVERFLOW:
+      filename_fmt = vm_soe_filename_fmt;
+      break;
+    default:
+      filename_fmt = vm_error_filename_fmt;
+  }
+  char* emergency_dump_path = NULL;
+  pos += fsep_len;
+  if (Arguments::copy_expand_pid(filename_fmt, strlen(filename_fmt), &buffer[pos], O_BUFLEN - pos)) {
+    const size_t emergency_filename_length = strlen(buffer);
+    emergency_dump_path = NEW_RESOURCE_ARRAY_RETURN_NULL(char, emergency_filename_length + 1);
+    if (NULL == emergency_dump_path) {
+      return NULL;
+    }
+    strncpy(emergency_dump_path, buffer, emergency_filename_length);
+    emergency_dump_path[emergency_filename_length] = '\0';
+  }
+  return emergency_dump_path;
+}
+
+// Caller needs ResourceMark
+static const char* create_emergency_chunk_path(const char* repository_base, size_t repository_len) {
+  assert(repository_base != NULL, "invariant");
+  assert(JfrStream_lock->owned_by_self(), "invariant");
+  // date time
+  char date_time_buffer[32] = {0};
+  date_time(date_time_buffer, sizeof(date_time_buffer));
+  size_t date_time_len = strlen(date_time_buffer);
+  size_t chunkname_max_len = repository_len               // repository_base
+                             + 1                          // "/"
+                             + date_time_len              // date_time
+                             + strlen(chunk_file_jfr_ext) // .jfr
+                             + 1;
+  char* chunk_path = NEW_RESOURCE_ARRAY_RETURN_NULL(char, chunkname_max_len);
+  if (chunk_path == NULL) {
+    return NULL;
+  }
+  // append the individual substrings
+  jio_snprintf(chunk_path, chunkname_max_len, "%s%s%s%s", repository_base, os::file_separator(), date_time_buffer, chunk_file_jfr_ext);
+  return chunk_path;
+}
+
+static fio_fd emergency_dump_file() {
+  assert(JfrStream_lock->owned_by_self(), "invariant");
+  ResourceMark rm;
+  const char* const emergency_dump_path = create_emergency_dump_path();
+  if (emergency_dump_path == NULL) {
+    return invalid_fd;
+  }
+  const fio_fd fd = open_exclusivly(emergency_dump_path);
+  if (fd != invalid_fd) {
+    if (LogJFR) tty->print_cr( // For user, should not be "jfr, system"
+      "Attempting to recover JFR data, emergency jfr file: %s", emergency_dump_path);
+  }
+  return fd;
+}
+
+static const char* emergency_path(const char* repository, size_t repository_len) {
+  return repository == NULL ? create_emergency_dump_path() : create_emergency_chunk_path(repository, repository_len);
+}
+
+void JfrRepository::on_vm_error() {
+  assert(!JfrStream_lock->owned_by_self(), "invariant");
+  const char* path = _path;
+  if (path == NULL) {
+    // completed already
+    return;
+  }
+  ResourceMark rm;
+  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+  const fio_fd emergency_fd = emergency_dump_file();
+  if (emergency_fd != invalid_fd) {
+    RepositoryIterator iterator(path, strlen(path));
+    write_emergency_file(emergency_fd, iterator);
+    os::close(emergency_fd);
+  }
+}
+
+bool JfrRepository::set_path(const char* path) {
+  assert(path != NULL, "trying to set the repository path with a NULL string!");
+  if (_path != NULL) {
+    // delete existing
+    JfrCHeapObj::free(_path, strlen(_path) + 1);
+  }
+  const size_t path_len = strlen(path);
+  _path = JfrCHeapObj::new_array<char>(path_len + 1);
+  if (_path == NULL) {
+    return false;
+  }
+  strncpy(_path, path, path_len);
+  _path[path_len] = '\0';
+  return true;
+}
+
+void JfrRepository::set_chunk_path(const char* path) {
+  assert(JfrStream_lock->owned_by_self(), "invariant");
+  chunkwriter().set_chunk_path(path);
+}
+
+void JfrRepository::notify_on_new_chunk_path() {
+  if (Jfr::is_recording()) {
+    instance()._post_box.post(MSG_ROTATE);
+  }
+}
+
+/**
+* Sets the file where data should be written.
+*
+* Recording  Previous  Current  Action
+* ==============================================
+*   true     null      null     Ignore, keep recording in-memory
+*   true     null      file1    Start disk recording
+*   true     file      null     Copy out metadata to disk and continue in-memory recording
+*   true     file1     file2    Copy out metadata and start with new File (file2)
+*   false     *        null     Ignore, but start recording to memory
+*   false     *        file     Ignore, but start recording to disk
+*/
+void JfrRepository::set_chunk_path(jstring path, JavaThread* jt) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
+  ResourceMark rm(jt);
+  const char* const canonical_chunk_path = JfrJavaSupport::c_str(path, jt);
+  {
+    MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+    if (NULL == canonical_chunk_path && !_chunkwriter->is_valid()) {
+      // new output is NULL and current output is NULL
+      return;
+    }
+    instance().set_chunk_path(canonical_chunk_path);
+  }
+  notify_on_new_chunk_path();
+}
+
+void JfrRepository::set_path(jstring location, JavaThread* jt) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
+  ResourceMark rm(jt);
+  const char* const path = JfrJavaSupport::c_str(location, jt);
+  if (path != NULL) {
+    instance().set_path(path);
+  }
+}
+
+bool JfrRepository::open_chunk(bool vm_error /* false */) {
+  assert(JfrStream_lock->owned_by_self(), "invariant");
+  if (vm_error) {
+    ResourceMark rm;
+    const char* repository_path = _path;
+    const size_t repository_path_len = repository_path != NULL ? strlen(repository_path) : 0;
+    const char* const path = emergency_path(repository_path, repository_path_len);
+    _chunkwriter->set_chunk_path(path);
+  }
+  return _chunkwriter->open();
+}
+
+size_t JfrRepository::close_chunk(jlong metadata_offset) {
+  return _chunkwriter->close(metadata_offset);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/repository/jfrRepository.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_JFR_RECORDER_REPOSITORY_JFRREPOSITORY_HPP
+#define SHARE_VM_JFR_RECORDER_REPOSITORY_JFRREPOSITORY_HPP
+
+#include "jni.h"
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JfrChunkWriter;
+class JfrPostBox;
+
+//
+// Represents the location on disk where internal files, "chunks", are stored.
+//
+// A "chunk" is a self-contained file artifact storing events and metadata that
+// has been moved out of process memory.
+//
+// Chunk files are associated with recordings and are managed at a higher level in Java.
+// Java continously keeps the VM informed about new chunk locations via set_chunk_path().
+//
+// A JfrChunkWriter will open the next chunk file which it maintains as the current chunk.
+// There is a rotation scheme in place for creating new chunks at certain intervals.
+//
+class JfrRepository : public JfrCHeapObj {
+  friend class JfrRecorder;
+  friend class JfrRecorderService;
+ private:
+  char* _path;
+  JfrPostBox& _post_box;
+
+  JfrRepository(JfrPostBox& post_box);
+  ~JfrRepository();
+
+  bool set_path(const char* path);
+  void set_chunk_path(const char* path);
+  bool open_chunk(bool vm_error = false);
+  size_t close_chunk(jlong metadata_offset);
+  void on_vm_error();
+  static void notify_on_new_chunk_path();
+  static JfrChunkWriter& chunkwriter();
+
+  static JfrRepository& instance();
+  static JfrRepository* create(JfrPostBox& post_box);
+  bool initialize();
+  static void destroy();
+
+ public:
+  static void set_path(jstring location, JavaThread* jt);
+  static void set_chunk_path(jstring path, JavaThread* jt);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_REPOSITORY_JFRREPOSITORY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrEvent.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/service/jfrEvent.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "utilities/macros.hpp"
+
+#ifdef ASSERT
+JfrEventVerifier::JfrEventVerifier() : _committed(false) {
+  memset(_verification_storage, 0, (sizeof(_verification_storage)));
+  _verification_bit_map = BitMap(_verification_storage, (BitMap::idx_t)(sizeof(_verification_storage) * BitsPerByte));
+}
+
+void JfrEventVerifier::check(BitMap::idx_t field_idx) const {
+  assert(field_idx < _verification_bit_map.size(), "too many fields to verify, please resize _verification_storage");
+}
+
+void JfrEventVerifier::set_field_bit(size_t field_idx) {
+  check((BitMap::idx_t)field_idx);
+  _verification_bit_map.set_bit((BitMap::idx_t)field_idx);
+}
+
+bool JfrEventVerifier::verify_field_bit(size_t field_idx) const {
+  check((BitMap::idx_t)field_idx);
+  return _verification_bit_map.at((BitMap::idx_t)field_idx);
+}
+
+void JfrEventVerifier::set_committed() {
+  assert(!_committed, "invariant");
+  _committed = true;
+}
+
+void JfrEventVerifier::clear_committed() {
+  _committed = false;
+}
+
+bool JfrEventVerifier::committed() const {
+  return _committed;
+}
+
+#endif // ASSERT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrEvent.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_SERVICE_JFREVENT_HPP
+#define SHARE_VM_JFR_RECORDER_SERVICE_JFREVENT_HPP
+
+#include "jfr/recorder/jfrEventSetting.inline.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "jfr/writers/jfrNativeEventWriter.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/ticks.hpp"
+#ifdef ASSERT
+#include "utilities/bitMap.hpp"
+#endif
+
+#ifdef ASSERT
+class JfrEventVerifier {
+  template <typename>
+  friend class JfrEvent;
+ private:
+  // verification of event fields
+  BitMap::bm_word_t _verification_storage[1];
+  BitMap _verification_bit_map;
+  bool _committed;
+
+  JfrEventVerifier();
+  void check(BitMap::idx_t field_idx) const;
+  void set_field_bit(size_t field_idx);
+  bool verify_field_bit(size_t field_idx) const;
+  void set_committed();
+  void clear_committed();
+  bool committed() const;
+};
+#endif // ASSERT
+
+template <typename T>
+class JfrEvent {
+ private:
+  jlong _start_time;
+  jlong _end_time;
+  bool _started;
+
+ protected:
+  JfrEvent(EventStartTime timing=TIMED) : _start_time(0), _end_time(0), _started(false)
+#ifdef ASSERT
+  , _verifier()
+#endif
+  {
+    if (T::is_enabled()) {
+      _started = true;
+      if (TIMED == timing && !T::isInstant) {
+        set_starttime(JfrTicks::now());
+      }
+    }
+  }
+
+  void commit() {
+    if (!should_commit()) {
+      return;
+    }
+    assert(!_verifier.committed(), "event already committed");
+    if (_start_time == 0) {
+      set_starttime(JfrTicks::now());
+    } else if (_end_time == 0) {
+      set_endtime(JfrTicks::now());
+    }
+    if (should_write()) {
+      write_event();
+      DEBUG_ONLY(_verifier.set_committed();)
+    }
+  }
+
+ public:
+  void set_starttime(const JfrTicks& time) {
+    _start_time = time.value();
+  }
+
+  void set_endtime(const JfrTicks& time) {
+    _end_time = time.value();
+  }
+
+  void set_starttime(const Ticks& time) {
+    _start_time = JfrTime::is_ft_enabled() ? time.ft_value() : time.value();
+  }
+
+  void set_endtime(const Ticks& time) {
+    _end_time = JfrTime::is_ft_enabled() ? time.ft_value() : time.value();
+  }
+
+  static bool is_enabled() {
+    return JfrEventSetting::is_enabled(T::eventId);
+  }
+
+  static bool is_stacktrace_enabled() {
+    return JfrEventSetting::has_stacktrace(T::eventId);
+  }
+
+  static JfrEventId id() {
+    return T::eventId;
+  }
+
+  static bool is_instant() {
+    return T::isInstant;
+  }
+
+  static bool is_requestable() {
+    return T::isRequestable;
+  }
+
+  static bool has_thread() {
+    return T::hasThread;
+  }
+
+  static bool has_stacktrace() {
+    return T::hasStackTrace;
+  }
+
+  bool should_commit() {
+    return _started;
+  }
+
+ private:
+  bool should_write() {
+    if (T::isInstant || T::isRequestable || T::hasCutoff) {
+      return true;
+    }
+    return (_end_time - _start_time) >= JfrEventSetting::threshold(T::eventId);
+  }
+
+  void write_event() {
+    DEBUG_ONLY(assert_precondition();)
+    Thread* const event_thread = Thread::current();
+    JfrThreadLocal* const tl = event_thread->jfr_thread_local();
+    JfrBuffer* const buffer = tl->native_buffer();
+    if (buffer == NULL) {
+      // most likely a pending OOM
+      return;
+    }
+    JfrNativeEventWriter writer(buffer, event_thread);
+    writer.write<u8>(T::eventId);
+    assert(_start_time != 0, "invariant");
+    writer.write(_start_time);
+    if (!(T::isInstant || T::isRequestable) || T::hasCutoff) {
+      assert(_end_time != 0, "invariant");
+      writer.write(_end_time - _start_time);
+    }
+    if (T::hasThread) {
+      writer.write(tl->thread_id());
+    }
+    if (T::hasStackTrace) {
+      if (is_stacktrace_enabled()) {
+        if (tl->has_cached_stack_trace()) {
+          writer.write(tl->cached_stack_trace_id());
+        } else {
+          writer.write(JfrStackTraceRepository::record(event_thread));
+        }
+      } else {
+        writer.write<traceid>(0);
+      }
+    }
+    // payload
+    static_cast<T*>(this)->writeData(writer);
+  }
+
+#ifdef ASSERT
+ private:
+  // verification of event fields
+  JfrEventVerifier _verifier;
+
+  void assert_precondition() {
+    assert(T::eventId >= (JfrEventId)NUM_RESERVED_EVENTS, "event id underflow invariant");
+    assert(T::eventId < MaxJfrEventId, "event id overflow invariant");
+    DEBUG_ONLY(static_cast<T*>(this)->verify());
+  }
+
+ protected:
+  void set_field_bit(size_t field_idx) {
+    _verifier.set_field_bit(field_idx);
+    // it is ok to reuse an already committed event
+    // granted you provide new informational content
+    _verifier.clear_committed();
+  }
+
+  bool verify_field_bit(size_t field_idx) const {
+    return _verifier.verify_field_bit(field_idx);
+  }
+#endif // ASSERT
+};
+
+#endif // SHARE_VM_JFR_RECORDER_SERVICE_JFREVENT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrMemorySizer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/service/jfrMemorySizer.hpp"
+#include "runtime/os.hpp"
+
+const julong MAX_ADJUSTED_GLOBAL_BUFFER_SIZE = 1 * M;
+const julong MIN_ADJUSTED_GLOBAL_BUFFER_SIZE_CUTOFF = 512 * K;
+const julong MIN_GLOBAL_BUFFER_SIZE = 64 * K;
+// implies at least 2 * MIN_GLOBAL_BUFFER SIZE
+const julong MIN_BUFFER_COUNT = 2;
+// MAX global buffer count open ended
+const julong DEFAULT_BUFFER_COUNT = 20;
+// MAX thread local buffer size == size of a single global buffer (runtime determined)
+// DEFAULT thread local buffer size = 2 * os page size (runtime determined)
+const julong MIN_THREAD_BUFFER_SIZE = 4 * K;
+const julong MIN_MEMORY_SIZE = 1 * M;
+const julong DEFAULT_MEMORY_SIZE = 10 * M;
+
+//
+// In pages:
+//
+// units = total_pages / per_unit_pages
+//
+static julong div_pages(julong& total_pages, julong& per_unit_pages) {
+  assert(total_pages > 0, "invariant");
+  assert(per_unit_pages > 0, "invariant");
+  assert(total_pages >= per_unit_pages, "invariant");
+
+  const julong units = total_pages / per_unit_pages;
+  const julong rem = total_pages % per_unit_pages;
+
+  assert(units > 0, "invariant");
+
+  if (rem > 0) {
+    total_pages -= rem % units;
+    per_unit_pages += rem / units;
+  }
+
+  assert(per_unit_pages > 0, "invariant");
+  assert(total_pages % units == 0, "invariant");
+  assert(units * per_unit_pages == total_pages, "invariant");
+  assert(units == total_pages / per_unit_pages, "invariant");
+
+  return units;
+}
+
+static void page_size_align_up(julong& value) {
+  static const julong alignment = os::vm_page_size() - 1;
+  value = (value + alignment) & ~alignment;
+}
+
+//
+// In bytes:
+// units = total_bytes / per_unit_bytes
+//
+static julong div_total_by_per_unit(julong& total_bytes, julong& per_unit_bytes) {
+  assert(total_bytes > 0, "invariant");
+  assert(per_unit_bytes > 0, "invariant");
+  assert(total_bytes >= per_unit_bytes, "invariant");
+
+  page_size_align_up(total_bytes);
+  assert(total_bytes % os::vm_page_size() == 0, "invariant");
+  julong total_pages = total_bytes / os::vm_page_size();
+
+  page_size_align_up(per_unit_bytes);
+  assert(per_unit_bytes % os::vm_page_size() == 0, "invariant");
+  julong per_unit_pages = per_unit_bytes / os::vm_page_size();
+
+  const julong units = div_pages(total_pages, per_unit_pages);
+  assert(units > 0, "invariant");
+
+  total_bytes = total_pages * os::vm_page_size();
+  per_unit_bytes = per_unit_pages * os::vm_page_size();
+
+  assert(per_unit_bytes > 0, "invariant");
+  assert(total_bytes / per_unit_bytes == units, "invariant");
+
+  return units;
+}
+
+//
+// per_unit_bytes = total_bytes / units
+//
+static julong div_total_by_units(julong& total_bytes, julong& units) {
+  page_size_align_up(total_bytes);
+  assert(total_bytes % os::vm_page_size() == 0, "invariant");
+  julong total_pages = total_bytes / os::vm_page_size();
+  assert(units > 0, "invariant");
+
+  julong per_unit_pages = total_pages <= units ? 1 : total_pages / units;
+  units = div_pages(total_pages, per_unit_pages);
+
+  julong per_unit_bytes = per_unit_pages * os::vm_page_size();
+  assert(per_unit_bytes % os::vm_page_size() == 0, "invariant");
+
+  total_bytes = total_pages * os::vm_page_size();
+  assert(total_bytes % os::vm_page_size() == 0, "invariant");
+
+  assert(total_bytes % units == 0, "invariant");
+  assert(total_bytes / units == per_unit_bytes, "invariant");
+  assert(units * per_unit_bytes == total_bytes, "invariant");
+
+  return per_unit_bytes;
+}
+
+//
+// total_bytes = per_unit_bytes * units;
+//
+static julong multiply(julong& per_unit_bytes, julong& units) {
+  page_size_align_up(per_unit_bytes);
+  assert(per_unit_bytes % os::vm_page_size() == 0, "invariant");
+  assert(units > 0, "invariant");
+
+  julong total_bytes = per_unit_bytes * units;
+  assert(total_bytes % os::vm_page_size() == 0, "invariant");
+
+  assert(total_bytes % units == 0, "invariant");
+  assert(total_bytes / units == per_unit_bytes, "invariant");
+  assert(units * per_unit_bytes == total_bytes, "invariant");
+
+  return total_bytes;
+}
+
+// Total_bytes is explicitly set.
+//
+// Deduce other parameters by delegating to a sizing policy
+template <typename SizingPolicy>
+static julong adjust(JfrMemoryOptions* options) {
+  page_size_align_up(options->memory_size);
+  assert(options->memory_size % os::vm_page_size() == 0, "invariant");
+  julong total_pages = options->memory_size / os::vm_page_size();
+  assert(options->buffer_count > 0, "invariant");
+  julong per_unit_pages = total_pages / options->buffer_count;
+  page_size_align_up(options->thread_buffer_size);
+  assert(options->thread_buffer_size % os::vm_page_size() == 0, "invariant");
+  julong thread_buffer_pages = options->thread_buffer_size / os::vm_page_size();
+
+  SizingPolicy::adjust(total_pages, per_unit_pages, options->buffer_count, thread_buffer_pages, options->thread_buffer_size_configured);
+  assert(options->buffer_count * per_unit_pages == total_pages, "invariant");
+
+  const julong per_unit_bytes = per_unit_pages * os::vm_page_size();
+  options->memory_size = total_pages * os::vm_page_size();
+  options->thread_buffer_size = thread_buffer_pages * os::vm_page_size();
+
+  assert(options->memory_size % options->buffer_count == 0, "invariant");
+  assert(options->memory_size / options->buffer_count == per_unit_bytes, "invariant");
+  assert(options->buffer_count * per_unit_bytes == options->memory_size, "invariant");
+  assert(per_unit_bytes >= options->thread_buffer_size, "invariant");
+  return per_unit_bytes;
+}
+
+static void align_buffer_size(julong& buffer_size_in_pages, julong max_size_pages, julong min_size_pages, bool sizeup = false) {
+  buffer_size_in_pages = MIN2(buffer_size_in_pages, max_size_pages);
+  buffer_size_in_pages = MAX2(buffer_size_in_pages, min_size_pages);
+  size_t multiples = 0;
+  if (buffer_size_in_pages < max_size_pages) {
+    while (buffer_size_in_pages >=
+      (min_size_pages << (multiples + (sizeup ? 0 : 1)))) {
+      ++multiples;
+    }
+    buffer_size_in_pages = min_size_pages << multiples;
+  }
+  assert(buffer_size_in_pages >= min_size_pages && buffer_size_in_pages <= max_size_pages, "invariant");
+}
+
+static void adjust_buffer_size_to_total_memory_size(julong& total_pages, julong& buffer_size_pages) {
+  static const julong max_buffer_size_pages = MAX_ADJUSTED_GLOBAL_BUFFER_SIZE / os::vm_page_size();
+  // If memory size is less than DEFAULT_MEMORY_SIZE,
+  // the adjustment algorithm can decrease the size of the global buffer
+  // all the way down to the MIN_GLOBAL_BUFFER_SIZE (taking embedded use case in account).
+  // If memory size is larger than DEFAULT_MEMORY_SIZE, the lowest size of
+  // a global buffer will be the size of MIN_ADJUSTED_GLOBAL_BUFFER_SIZE_CUTOFF
+  static const julong min_buffer_size_pages =
+    total_pages * os::vm_page_size() < DEFAULT_MEMORY_SIZE ?
+      MIN_GLOBAL_BUFFER_SIZE / os::vm_page_size() :
+      MIN_ADJUSTED_GLOBAL_BUFFER_SIZE_CUTOFF / os::vm_page_size();
+
+  align_buffer_size(buffer_size_pages, max_buffer_size_pages, min_buffer_size_pages);
+  assert(buffer_size_pages % min_buffer_size_pages == 0, "invariant");
+
+  julong remainder = total_pages % buffer_size_pages;
+  while (remainder >= (buffer_size_pages >> 1)) {
+    if (buffer_size_pages <= min_buffer_size_pages) {
+      break;
+    }
+    buffer_size_pages >>= 1;
+    remainder = total_pages % buffer_size_pages;
+  }
+}
+
+// Sizing policy class
+class ScaleOutAdjuster : public AllStatic {
+ public:
+  static void adjust(julong& total_pages,
+                     julong& buffer_size_pages,
+                     julong& buffer_count,
+                     julong& thread_buffer_size_pages,
+                     bool is_thread_buffer_size_set) {
+    assert(buffer_count > 0, "invariant");
+    adjust_buffer_size_to_total_memory_size(total_pages, buffer_size_pages);
+    assert(buffer_size_pages * os::vm_page_size() >= MIN_GLOBAL_BUFFER_SIZE, "invariant");
+    assert((buffer_size_pages * os::vm_page_size()) % MIN_GLOBAL_BUFFER_SIZE == 0, "invariant");
+    if (is_thread_buffer_size_set) {
+      if (thread_buffer_size_pages > buffer_size_pages) {
+        buffer_size_pages = thread_buffer_size_pages;
+      }
+    }
+    // and with this information, calculate what the new buffer count will be
+    buffer_count = div_pages(total_pages, buffer_size_pages);
+  }
+};
+
+static void memory_and_thread_buffer_size(JfrMemoryOptions* options) {
+  assert(options->memory_size_configured, "invariant");
+  assert(!options->buffer_count_configured, "invariant");
+  assert(!options->global_buffer_size_configured, "invariant");
+  // here the only thing specified is the overall total memory size
+  // we can and will apply some sizing heuristics to derive both
+  // the size of an individual global buffer and by implication the number of global
+  // buffers to use. Starting values for buffer count and global_buffer_size
+  // will be the defaults.
+  options->global_buffer_size = adjust<ScaleOutAdjuster>(options);
+}
+
+static void memory_size_and_buffer_count(JfrMemoryOptions* options) {
+  assert(options->memory_size_configured, "invariant");
+  assert(!options->global_buffer_size_configured, "invariant");
+  assert(!options->thread_buffer_size_configured, "invariant");
+  assert(options->buffer_count_configured, "invariant");
+  options->global_buffer_size = div_total_by_units(options->memory_size, options->buffer_count);
+}
+
+static void memory_size_and_global_buffer_size(JfrMemoryOptions* options) {
+  assert(options->memory_size_configured, "invariant");
+  assert(options->global_buffer_size_configured, "invariant");
+  assert(!options->buffer_count_configured, "invariant");
+  page_size_align_up(options->thread_buffer_size);
+  options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size);
+  if (options->thread_buffer_size > options->global_buffer_size) {
+    options->global_buffer_size = options->thread_buffer_size;
+    options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size);
+  }
+  assert(options->global_buffer_size >= options->thread_buffer_size, "invariant");
+}
+
+static bool is_ambiguous(const JfrMemoryOptions* options) {
+  assert(options->memory_size_configured, "invariant");
+  assert(options->global_buffer_size_configured, "invariant");
+  assert(options->buffer_count_configured, "invariant");
+  assert(options->thread_buffer_size <= options->global_buffer_size, "invariant");
+  // This can cause an ambiguous situation because all three parameters are explicitly set.
+  return options->global_buffer_size * options->buffer_count != options->memory_size;
+}
+
+static void all_options_set(JfrMemoryOptions* options) {
+  options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size);
+  page_size_align_up(options->thread_buffer_size);
+  if (options->thread_buffer_size > options->global_buffer_size) {
+    options->global_buffer_size = options->thread_buffer_size;
+    options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size);
+  }
+  assert(options->global_buffer_size >= options->thread_buffer_size, "invariant");
+  assert(options->memory_size / options->global_buffer_size == options->buffer_count, "invariant");
+  assert(options->memory_size % options->global_buffer_size == 0, "invariant");
+}
+
+static void global_buffer_size(JfrMemoryOptions* options) {
+  assert(!options->memory_size_configured, "invariant");
+  page_size_align_up(options->thread_buffer_size);
+  if (options->thread_buffer_size > options->global_buffer_size) {
+    options->global_buffer_size = options->thread_buffer_size;
+  }
+  options->memory_size = multiply(options->global_buffer_size, options->buffer_count);
+  assert(options->global_buffer_size >= options->thread_buffer_size, "invariant");
+}
+
+static void thread_buffer_size(JfrMemoryOptions* options) {
+  assert(!options->global_buffer_size_configured, "invariant");
+  assert(options->thread_buffer_size_configured, "invariant");
+  page_size_align_up(options->thread_buffer_size);
+  options->global_buffer_size = div_total_by_units(options->memory_size, options->buffer_count);
+  if (options->thread_buffer_size > options->global_buffer_size) {
+    options->global_buffer_size = options->thread_buffer_size;
+    options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size);
+  }
+  assert(options->global_buffer_size >= options->thread_buffer_size, "invariant");
+}
+
+static void default_size(const JfrMemoryOptions* options) {
+  // no memory options explicitly set
+  // default values already statically adjusted
+  assert(!options->thread_buffer_size_configured, "invariant");
+  assert(!options->memory_size_configured, "invariant");
+  assert(!options->buffer_count_configured, "invarinat");
+  assert(!options->global_buffer_size_configured, "invariant");
+}
+
+#ifdef ASSERT
+static void assert_post_condition(const JfrMemoryOptions* options) {
+  assert(options->memory_size % os::vm_page_size() == 0, "invariant");
+  assert(options->global_buffer_size % os::vm_page_size() == 0, "invariant");
+  assert(options->thread_buffer_size % os::vm_page_size() == 0, "invariant");
+  assert(options->buffer_count > 0, "invariant");
+}
+#endif
+
+// MEMORY SIZING ALGORITHM
+
+bool JfrMemorySizer::adjust_options(JfrMemoryOptions* options) {
+  assert(options != NULL, "invariant");
+
+  enum MemoryOptions {
+    MEMORY_SIZE = 1,
+    GLOBAL_BUFFER_SIZE = 2,
+    GLOBAL_BUFFER_COUNT = 4,
+    THREAD_BUFFER_SIZE = 8
+  };
+
+  // LEGEND
+  //
+  // M = "memorysize" option
+  // G = "globalbuffersize" option
+  // C = "numglobalbuffers" option
+  // T = "threadbuffersize" option
+  //
+  // The memory options comprise an n-set (a 4-set) = { M, G, C, T }
+  //
+  // Number of r-subsets = 5 (0, 1, 2, 3, 4) (including null set)
+  //
+  // Unordered selection:
+  //
+  // C(4, 0) = {} = NULL set = 1
+  // C(4, 1) = { (M), (G), (C), (T) } = 4
+  // C(4, 2) = { (M, G), (M, C), (M, T), (G, C), (G, T), (C, T) } = 6
+  // C(4, 3) = { (M, G, C), (M, G, T), (M, C, T), (G, C, T) } = 4
+  // C(4, 4) = { (M, G, C, T) } = 1
+  //
+  // in shorter terms: P({ M, G, C, T}) = 16
+  //
+#define MG   (MEMORY_SIZE | GLOBAL_BUFFER_SIZE)
+#define MC   (MEMORY_SIZE | GLOBAL_BUFFER_COUNT)
+#define MT   (MEMORY_SIZE | THREAD_BUFFER_SIZE)
+#define MGC  (MG | GLOBAL_BUFFER_COUNT)
+#define MGT  (MG | THREAD_BUFFER_SIZE)
+#define MCT  (MC | THREAD_BUFFER_SIZE)
+#define MGCT (MGC | THREAD_BUFFER_SIZE)
+#define GC   (GLOBAL_BUFFER_SIZE | GLOBAL_BUFFER_COUNT)
+#define GT   (GLOBAL_BUFFER_SIZE | THREAD_BUFFER_SIZE)
+#define GCT  (GC | THREAD_BUFFER_SIZE)
+#define CT   (GLOBAL_BUFFER_COUNT | THREAD_BUFFER_SIZE)
+
+  int set_of_options = 0;
+
+  if (options->memory_size_configured) {
+    set_of_options |= MEMORY_SIZE;
+  }
+  if (options->global_buffer_size_configured) {
+    set_of_options |= GLOBAL_BUFFER_SIZE;
+  }
+  if (options->buffer_count_configured) {
+    set_of_options |= GLOBAL_BUFFER_COUNT;
+  }
+  if (options->thread_buffer_size_configured) {
+    set_of_options |= THREAD_BUFFER_SIZE;
+  }
+
+  switch (set_of_options) {
+    case MT:
+    case MEMORY_SIZE:
+      memory_and_thread_buffer_size(options);
+      break;
+    case MC:
+      memory_size_and_buffer_count(options);
+      break;
+    case MGT:
+      assert(options->thread_buffer_size_configured, "invariant");
+    case MG:
+      memory_size_and_global_buffer_size(options);
+      break;
+    case MGC:
+    case MGCT:
+      if (is_ambiguous(options)) {
+        // Let the user resolve the ambiguity by bailing.
+        return false;
+      }
+      all_options_set(options);
+      break;
+    case GCT:
+      assert(options->buffer_count_configured, "invariant");
+      assert(options->thread_buffer_size_configured, "invariant");
+    case GC:
+      assert(options->global_buffer_size_configured, "invariant");
+    case GT:
+    case GLOBAL_BUFFER_COUNT:
+    case GLOBAL_BUFFER_SIZE:
+      global_buffer_size(options);
+      break;
+    case MCT:
+      assert(options->memory_size_configured, "invariant");
+    case CT:
+      assert(options->buffer_count_configured, "invariant");
+    case THREAD_BUFFER_SIZE:
+      thread_buffer_size(options);
+      break;
+    default:
+      default_size(options);
+  }
+  DEBUG_ONLY(assert_post_condition(options);)
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrMemorySizer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_SERVICE_JFRMEMORYSIZER_HPP
+#define SHARE_VM_JFR_RECORDER_SERVICE_JFRMEMORYSIZER_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+extern const julong MIN_BUFFER_COUNT;
+extern const julong MIN_GLOBAL_BUFFER_SIZE;
+extern const julong MIN_MEMORY_SIZE;
+extern const julong MIN_THREAD_BUFFER_SIZE;
+
+struct JfrMemoryOptions {
+  julong memory_size;
+  julong global_buffer_size;
+  julong buffer_count;
+  julong thread_buffer_size;
+  bool memory_size_configured;
+  bool global_buffer_size_configured;
+  bool buffer_count_configured;
+  bool thread_buffer_size_configured;
+};
+
+//
+// Encapsulates sizing of memory options
+// The options parameter is modified with updated values.
+//
+class JfrMemorySizer : AllStatic {
+ public:
+  static bool adjust_options(JfrMemoryOptions* options);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_SERVICE_JFRMEMORYSIZER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrOptionSet.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,729 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/dcmd/jfrDcmds.hpp"
+#include "jfr/recorder/service/jfrMemorySizer.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/java.hpp"
+#include "runtime/thread.inline.hpp"
+#include "services/diagnosticArgument.hpp"
+#include "services/diagnosticFramework.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/ostream.hpp"
+
+struct ObsoleteOption {
+  const char* name;
+  const char* message;
+};
+
+static const ObsoleteOption OBSOLETE_OPTIONS[] = {
+  {"checkpointbuffersize", ""},
+  {"maxsize",              "Use -XX:StartFlightRecording=maxsize=... instead."},
+  {"maxage",               "Use -XX:StartFlightRecording=maxage=... instead."},
+  {"settings",             "Use -XX:StartFlightRecording=settings=... instead."},
+  {"defaultrecording",     "Use -XX:StartFlightRecording=disk=false to create an in-memory recording."},
+  {"disk",                 "Use -XX:StartFlightRecording=disk=... instead."},
+  {"dumponexit",           "Use -XX:StartFlightRecording=dumponexit=... instead."},
+  {"dumponexitpath",       "Use -XX:StartFlightRecording=filename=... instead."},
+  {"loglevel",             "Use -Xlog:jfr=... instead."}
+};
+
+jlong JfrOptionSet::max_chunk_size() {
+  return _max_chunk_size;
+}
+
+void JfrOptionSet::set_max_chunk_size(jlong value) {
+  _max_chunk_size = value;
+}
+
+jlong JfrOptionSet::global_buffer_size() {
+  return _global_buffer_size;
+}
+
+void JfrOptionSet::set_global_buffer_size(jlong value) {
+  _global_buffer_size = value;
+}
+
+jlong JfrOptionSet::thread_buffer_size() {
+  return _thread_buffer_size;
+}
+
+void JfrOptionSet::set_thread_buffer_size(jlong value) {
+  _thread_buffer_size = value;
+}
+
+jlong JfrOptionSet::memory_size() {
+  return _memory_size;
+}
+
+void JfrOptionSet::set_memory_size(jlong value) {
+  _memory_size = value;
+}
+
+jlong JfrOptionSet::num_global_buffers() {
+  return _num_global_buffers;
+}
+
+void JfrOptionSet::set_num_global_buffers(jlong value) {
+  _num_global_buffers = value;
+}
+
+jint JfrOptionSet::old_object_queue_size() {
+  return (jint)_old_object_queue_size;
+}
+
+void JfrOptionSet::set_old_object_queue_size(jlong value) {
+  _old_object_queue_size = value;
+}
+
+u4 JfrOptionSet::stackdepth() {
+  return _stack_depth;
+}
+
+static const u4 STACK_DEPTH_DEFAULT = 64;
+static const u4 MIN_STACK_DEPTH = 1;
+static const u4 MAX_STACK_DEPTH = 2048;
+
+void JfrOptionSet::set_stackdepth(u4 depth) {
+  if (depth < MIN_STACK_DEPTH) {
+    _stack_depth = MIN_STACK_DEPTH;
+  } else if (depth > MAX_STACK_DEPTH) {
+    _stack_depth = MAX_STACK_DEPTH;
+  } else {
+    _stack_depth = depth;
+  }
+}
+
+bool JfrOptionSet::sample_threads() {
+  return _sample_threads == JNI_TRUE;
+}
+
+void JfrOptionSet::set_sample_threads(jboolean sample) {
+  _sample_threads = sample;
+}
+
+bool JfrOptionSet::can_retransform() {
+  return _retransform == JNI_TRUE;
+}
+
+void JfrOptionSet::set_retransform(jboolean value) {
+  _retransform = value;
+}
+
+bool JfrOptionSet::sample_protection() {
+  return _sample_protection == JNI_TRUE;
+}
+
+#ifdef ASSERT
+void JfrOptionSet::set_sample_protection(jboolean protection) {
+  _sample_protection = protection;
+}
+#endif
+
+bool JfrOptionSet::compressed_integers() {
+  // Set this to false for debugging purposes.
+  return true;
+}
+
+bool JfrOptionSet::allow_retransforms() {
+#if INCLUDE_JVMTI
+  return true;
+#else
+  return false;
+#endif
+}
+
+bool JfrOptionSet::allow_event_retransforms() {
+  return allow_retransforms() && (DumpSharedSpaces || can_retransform());
+}
+
+// default options for the dcmd parser
+const char* const default_repository = NULL;
+const char* const default_global_buffer_size = "512k";
+const char* const default_num_global_buffers = "20";
+const char* const default_memory_size = "10m";
+const char* const default_thread_buffer_size = "8k";
+const char* const default_max_chunk_size = "12m";
+const char* const default_sample_threads = "true";
+const char* const default_stack_depth = "64";
+const char* const default_retransform = "true";
+const char* const default_old_object_queue_size = "256";
+DEBUG_ONLY(const char* const default_sample_protection = "false";)
+
+// statics
+static DCmdArgument<char*> _dcmd_repository(
+  "repository",
+  "Flight recorder disk repository location",
+  "STRING",
+  false,
+  default_repository);
+
+static DCmdArgument<MemorySizeArgument> _dcmd_threadbuffersize(
+  "threadbuffersize",
+  "Thread buffer size",
+  "MEMORY SIZE",
+  false,
+  default_thread_buffer_size);
+
+static DCmdArgument<MemorySizeArgument> _dcmd_memorysize(
+  "memorysize",
+  "Size of memory to be used by Flight Recorder",
+  "MEMORY SIZE",
+  false,
+  default_memory_size);
+
+static DCmdArgument<MemorySizeArgument> _dcmd_globalbuffersize(
+  "globalbuffersize",
+  "Global buffer size",
+  "MEMORY SIZE",
+  false,
+  default_global_buffer_size);
+
+static DCmdArgument<jlong> _dcmd_numglobalbuffers(
+  "numglobalbuffers",
+  "Number of global buffers",
+  "JULONG",
+  false,
+  default_num_global_buffers);
+
+static DCmdArgument<MemorySizeArgument> _dcmd_maxchunksize(
+  "maxchunksize",
+  "Maximum size of a single repository disk chunk",
+  "MEMORY SIZE",
+  false,
+  default_max_chunk_size);
+
+static DCmdArgument<jlong> _dcmd_old_object_queue_size (
+  "old-object-queue-size",
+  "Maximum number of old objects to track",
+  "JINT",
+  false,
+  default_old_object_queue_size);
+
+static DCmdArgument<bool> _dcmd_sample_threads(
+  "samplethreads",
+  "Thread sampling enable / disable (only sampling when event enabled and sampling enabled)",
+  "BOOLEAN",
+  false,
+  default_sample_threads);
+
+#ifdef ASSERT
+static DCmdArgument<bool> _dcmd_sample_protection(
+  "sampleprotection",
+  "Safeguard for stackwalking while sampling threads (false by default)",
+  "BOOLEAN",
+  false,
+  default_sample_protection);
+#endif
+
+static DCmdArgument<jlong> _dcmd_stackdepth(
+  "stackdepth",
+  "Stack depth for stacktraces (minimum 1, maximum 2048)",
+  "JULONG",
+  false,
+  default_stack_depth);
+
+static DCmdArgument<bool> _dcmd_retransform(
+  "retransform",
+  "If event classes should be instrumented using JVMTI (by default true)",
+  "BOOLEAN",
+  true,
+  default_retransform);
+
+static DCmdParser _parser;
+
+static void register_parser_options() {
+  _parser.add_dcmd_option(&_dcmd_repository);
+  _parser.add_dcmd_option(&_dcmd_threadbuffersize);
+  _parser.add_dcmd_option(&_dcmd_memorysize);
+  _parser.add_dcmd_option(&_dcmd_globalbuffersize);
+  _parser.add_dcmd_option(&_dcmd_numglobalbuffers);
+  _parser.add_dcmd_option(&_dcmd_maxchunksize);
+  _parser.add_dcmd_option(&_dcmd_stackdepth);
+  _parser.add_dcmd_option(&_dcmd_sample_threads);
+  _parser.add_dcmd_option(&_dcmd_retransform);
+  _parser.add_dcmd_option(&_dcmd_old_object_queue_size);
+  DEBUG_ONLY(_parser.add_dcmd_option(&_dcmd_sample_protection);)
+}
+
+static bool parse_flight_recorder_options_internal(TRAPS) {
+  if (FlightRecorderOptions == NULL) {
+    return true;
+  }
+  const size_t length = strlen((const char*)FlightRecorderOptions);
+  CmdLine cmdline((const char*)FlightRecorderOptions, length, true);
+  _parser.parse(&cmdline, ',', THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    for (int index = 0; index < 9; index++) {
+      ObsoleteOption option = OBSOLETE_OPTIONS[index];
+      const char* p = strstr((const char*)FlightRecorderOptions, option.name);
+      const size_t option_length = strlen(option.name);
+      if (p != NULL && p[option_length] == '=') {
+        tty->print_cr("-XX:FlightRecorderOptions=%s=... has been removed. %s", option.name, option.message);
+        return false;
+      }
+    }
+    ResourceMark rm(THREAD);
+    oop message = java_lang_Throwable::message(PENDING_EXCEPTION);
+    if (message != NULL) {
+      const char* msg = java_lang_String::as_utf8_string(message);
+      tty->print_cr("%s", msg);
+    }
+    CLEAR_PENDING_EXCEPTION;
+    return false;
+  }
+  return true;
+}
+
+jlong JfrOptionSet::_max_chunk_size = 0;
+jlong JfrOptionSet::_global_buffer_size = 0;
+jlong JfrOptionSet::_thread_buffer_size = 0;
+jlong JfrOptionSet::_memory_size = 0;
+jlong JfrOptionSet::_num_global_buffers = 0;
+jlong JfrOptionSet::_old_object_queue_size = 0;
+u4 JfrOptionSet::_stack_depth = STACK_DEPTH_DEFAULT;
+jboolean JfrOptionSet::_sample_threads = JNI_TRUE;
+jboolean JfrOptionSet::_retransform = JNI_TRUE;
+#ifdef ASSERT
+jboolean JfrOptionSet::_sample_protection = JNI_FALSE;
+#else
+jboolean JfrOptionSet::_sample_protection = JNI_TRUE;
+#endif
+
+bool JfrOptionSet::initialize(Thread* thread) {
+  register_parser_options();
+  if (!parse_flight_recorder_options_internal(thread)) {
+    return false;
+  }
+  if (_dcmd_retransform.is_set()) {
+    set_retransform(_dcmd_retransform.value());
+  }
+  set_old_object_queue_size(_dcmd_old_object_queue_size.value());
+  return adjust_memory_options();
+}
+
+bool JfrOptionSet::configure(TRAPS) {
+  if (FlightRecorderOptions == NULL) {
+    return true;
+  }
+  ResourceMark rm(THREAD);
+  bufferedStream st;
+  // delegate to DCmd execution
+  JfrConfigureFlightRecorderDCmd configure(&st, false);
+  configure._repository_path.set_is_set(_dcmd_repository.is_set());
+  char* repo = _dcmd_repository.value();
+  if (repo != NULL) {
+    const size_t len = strlen(repo);
+    char* repo_copy = JfrCHeapObj::new_array<char>(len + 1);
+    if (NULL == repo_copy) {
+      return false;
+    }
+    strncpy(repo_copy, repo, len + 1);
+    configure._repository_path.set_value(repo_copy);
+  }
+
+  configure._stack_depth.set_is_set(_dcmd_stackdepth.is_set());
+  configure._stack_depth.set_value(_dcmd_stackdepth.value());
+
+  configure._thread_buffer_size.set_is_set(_dcmd_threadbuffersize.is_set());
+  configure._thread_buffer_size.set_value(_dcmd_threadbuffersize.value()._size);
+
+  configure._global_buffer_count.set_is_set(_dcmd_numglobalbuffers.is_set());
+  configure._global_buffer_count.set_value(_dcmd_numglobalbuffers.value());
+
+  configure._global_buffer_size.set_is_set(_dcmd_globalbuffersize.is_set());
+  configure._global_buffer_size.set_value(_dcmd_globalbuffersize.value()._size);
+
+  configure._max_chunk_size.set_is_set(_dcmd_maxchunksize.is_set());
+  configure._max_chunk_size.set_value(_dcmd_maxchunksize.value()._size);
+
+  configure._memory_size.set_is_set(_dcmd_memorysize.is_set());
+  configure._memory_size.set_value(_dcmd_memorysize.value()._size);
+
+  configure._sample_threads.set_is_set(_dcmd_sample_threads.is_set());
+  configure._sample_threads.set_value(_dcmd_sample_threads.value());
+
+  configure.execute(DCmd_Source_Internal, THREAD);
+
+  if (HAS_PENDING_EXCEPTION) {
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    CLEAR_PENDING_EXCEPTION;
+    return false;
+  }
+  return true;
+}
+
+template <typename Argument>
+static julong divide_with_user_unit(Argument& memory_argument, julong value) {
+  if (memory_argument.value()._size != memory_argument.value()._val) {
+    switch (memory_argument.value()._multiplier) {
+    case 'k': case 'K':
+      return value / K;
+    case 'm': case 'M':
+      return value / M;
+    case 'g': case 'G':
+      return value / G;
+    }
+  }
+  return value;
+}
+
+template <typename Argument>
+static void log_lower_than_min_value(Argument& memory_argument, julong min_value) {
+  if (memory_argument.value()._size != memory_argument.value()._val) {
+    // has multiplier
+    tty->print_cr(
+      "This value is lower than the minimum size required " JULONG_FORMAT "%c",
+      divide_with_user_unit(memory_argument, min_value),
+      memory_argument.value()._multiplier);
+    return;
+  }
+  tty->print_cr(
+    "This value is lower than the minimum size required " JULONG_FORMAT,
+    divide_with_user_unit(memory_argument, min_value));
+}
+
+template <typename Argument>
+static void log_set_value(Argument& memory_argument) {
+  if (memory_argument.value()._size != memory_argument.value()._val) {
+    // has multiplier
+    tty->print_cr(
+      "Value specified for option \"%s\" is " JULONG_FORMAT "%c",
+      memory_argument.name(),
+      memory_argument.value()._val,
+      memory_argument.value()._multiplier);
+    return;
+  }
+  tty->print_cr(
+    "Value specified for option \"%s\" is " JULONG_FORMAT,
+    memory_argument.name(), memory_argument.value()._val);
+}
+
+template <typename MemoryArg>
+static void log_adjustments(MemoryArg& original_memory_size, julong new_memory_size, const char* msg) {
+  if (LogJFR && Verbose) tty->print_cr(
+    "%s size (original) " JULONG_FORMAT " B (user defined: %s)",
+    msg,
+    original_memory_size.value()._size,
+    original_memory_size.is_set() ? "true" : "false");
+  if (LogJFR && Verbose) tty->print_cr(
+    "%s size (adjusted) " JULONG_FORMAT " B (modified: %s)",
+    msg,
+    new_memory_size,
+    original_memory_size.value()._size != new_memory_size ? "true" : "false");
+  if (LogJFR && Verbose) tty->print_cr(
+    "%s size (adjustment) %s" JULONG_FORMAT " B",
+    msg,
+    new_memory_size < original_memory_size.value()._size ? "-" : "+",
+    new_memory_size < original_memory_size.value()._size ?
+    original_memory_size.value()._size - new_memory_size :
+    new_memory_size - original_memory_size.value()._size);
+}
+
+// All "triangular" options are explicitly set
+// check that they are congruent and not causing
+// an ambiguous situtation
+template <typename MemoryArg, typename NumberArg>
+static bool check_for_ambiguity(MemoryArg& memory_size, MemoryArg& global_buffer_size, NumberArg& num_global_buffers) {
+  assert(memory_size.is_set(), "invariant");
+  assert(global_buffer_size.is_set(), "invariant");
+  assert(num_global_buffers.is_set(), "invariant");
+  const julong calc_size = global_buffer_size.value()._size * (julong)num_global_buffers.value();
+  if (calc_size != memory_size.value()._size) {
+    // ambiguous
+    log_set_value(global_buffer_size);
+    tty->print_cr(
+      "Value specified for option \"%s\" is " JLONG_FORMAT,
+      num_global_buffers.name(), num_global_buffers.value());
+    log_set_value(memory_size);
+    tty->print_cr(
+      "These values are causing an ambiguity when trying to determine how much memory to use");
+    tty->print_cr("\"%s\" * \"%s\" do not equal \"%s\"",
+      global_buffer_size.name(),
+      num_global_buffers.name(),
+      memory_size.name());
+    tty->print_cr(
+      "Try to remove one of the involved options or make sure they are unambigous");
+    return false;
+  }
+  return true;
+}
+
+template <typename Argument>
+static bool ensure_minimum_count(Argument& buffer_count_argument, jlong min_count) {
+  if (buffer_count_argument.value() < min_count) {
+    tty->print_cr(
+      "Value specified for option \"%s\" is " JLONG_FORMAT,
+      buffer_count_argument.name(), buffer_count_argument.value());
+    tty->print_cr(
+      "This value is lower than the minimum required number " JLONG_FORMAT,
+      min_count);
+    return false;
+  }
+  return true;
+}
+
+// global buffer size and num global buffers specified
+// ensure that particular combination to be ihigher than minimum memory size
+template <typename MemoryArg, typename NumberArg>
+static bool ensure_calculated_gteq(MemoryArg& global_buffer_size, NumberArg& num_global_buffers, julong min_value) {
+  assert(global_buffer_size.is_set(), "invariant");
+  assert(num_global_buffers.is_set(), "invariant");
+  const julong calc_size = global_buffer_size.value()._size * (julong)num_global_buffers.value();
+  if (calc_size < min_value) {
+    log_set_value(global_buffer_size);
+    tty->print_cr(
+      "Value specified for option \"%s\" is " JLONG_FORMAT,
+      num_global_buffers.name(), num_global_buffers.value());
+    tty->print_cr("\"%s\" * \"%s\" (" JULONG_FORMAT
+      ") is lower than minimum memory size required " JULONG_FORMAT,
+      global_buffer_size.name(),
+      num_global_buffers.name(),
+      calc_size,
+      min_value);
+    return false;
+  }
+  return true;
+}
+
+template <typename Argument>
+static bool ensure_first_gteq_second(Argument& first_argument, Argument& second_argument) {
+  if (second_argument.value()._size > first_argument.value()._size) {
+    log_set_value(first_argument);
+    log_set_value(second_argument);
+    tty->print_cr(
+      "The value for option \"%s\" should not be larger than the value specified for option \"%s\"",
+      second_argument.name(), first_argument.name());
+    return false;
+  }
+  return true;
+}
+
+static bool valid_memory_relations(const JfrMemoryOptions& options) {
+  if (options.global_buffer_size_configured) {
+    if (options.memory_size_configured) {
+      if (!ensure_first_gteq_second(_dcmd_memorysize, _dcmd_globalbuffersize)) {
+        return false;
+      }
+    }
+    if (options.thread_buffer_size_configured) {
+      if (!ensure_first_gteq_second(_dcmd_globalbuffersize, _dcmd_threadbuffersize)) {
+        return false;
+      }
+    }
+    if (options.buffer_count_configured) {
+      if (!ensure_calculated_gteq(_dcmd_globalbuffersize, _dcmd_numglobalbuffers, MIN_MEMORY_SIZE)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+static void post_process_adjusted_memory_options(const JfrMemoryOptions& options) {
+  assert(options.memory_size >= MIN_MEMORY_SIZE, "invariant");
+  assert(options.global_buffer_size >= MIN_GLOBAL_BUFFER_SIZE, "invariant");
+  assert(options.buffer_count >= MIN_BUFFER_COUNT, "invariant");
+  assert(options.thread_buffer_size >= MIN_THREAD_BUFFER_SIZE, "invariant");
+  log_adjustments(_dcmd_memorysize, options.memory_size, "Memory");
+  log_adjustments(_dcmd_globalbuffersize, options.global_buffer_size, "Global buffer");
+  log_adjustments(_dcmd_threadbuffersize, options.thread_buffer_size, "Thread local buffer");
+  if (LogJFR && Verbose) tty->print_cr("Number of global buffers (original) " JLONG_FORMAT " (user defined: %s)",
+    _dcmd_numglobalbuffers.value(),
+    _dcmd_numglobalbuffers.is_set() ? "true" : "false");
+  if (LogJFR && Verbose) tty->print_cr( "Number of global buffers (adjusted) " JULONG_FORMAT " (modified: %s)",
+    options.buffer_count,
+    _dcmd_numglobalbuffers.value() != (jlong)options.buffer_count ? "true" : "false");
+  if (LogJFR && Verbose) tty->print_cr("Number of global buffers (adjustment) %s" JLONG_FORMAT,
+    (jlong)options.buffer_count < _dcmd_numglobalbuffers.value() ? "" : "+",
+    (jlong)options.buffer_count - _dcmd_numglobalbuffers.value());
+
+  MemorySizeArgument adjusted_memory_size;
+  adjusted_memory_size._val = divide_with_user_unit(_dcmd_memorysize, options.memory_size);
+  adjusted_memory_size._multiplier = _dcmd_memorysize.value()._multiplier;
+  adjusted_memory_size._size = options.memory_size;
+
+  MemorySizeArgument adjusted_global_buffer_size;
+  adjusted_global_buffer_size._val = divide_with_user_unit(_dcmd_globalbuffersize, options.global_buffer_size);
+  adjusted_global_buffer_size._multiplier = _dcmd_globalbuffersize.value()._multiplier;
+  adjusted_global_buffer_size._size = options.global_buffer_size;
+
+  MemorySizeArgument adjusted_thread_buffer_size;
+  adjusted_thread_buffer_size._val = divide_with_user_unit(_dcmd_threadbuffersize, options.thread_buffer_size);
+  adjusted_thread_buffer_size._multiplier = _dcmd_threadbuffersize.value()._multiplier;
+  adjusted_thread_buffer_size._size = options.thread_buffer_size;
+
+  // store back to dcmd
+  _dcmd_memorysize.set_value(adjusted_memory_size);
+  _dcmd_memorysize.set_is_set(true);
+  _dcmd_globalbuffersize.set_value(adjusted_global_buffer_size);
+  _dcmd_globalbuffersize.set_is_set(true);
+  _dcmd_numglobalbuffers.set_value((jlong)options.buffer_count);
+  _dcmd_numglobalbuffers.set_is_set(true);
+  _dcmd_threadbuffersize.set_value(adjusted_thread_buffer_size);
+  _dcmd_threadbuffersize.set_is_set(true);
+}
+
+static void initialize_memory_options_from_dcmd(JfrMemoryOptions& options) {
+  options.memory_size = _dcmd_memorysize.value()._size;
+  options.global_buffer_size = MAX2<julong>(_dcmd_globalbuffersize.value()._size, (julong)os::vm_page_size());
+  options.buffer_count = (julong)_dcmd_numglobalbuffers.value();
+  options.thread_buffer_size = MAX2<julong>(_dcmd_threadbuffersize.value()._size, (julong)os::vm_page_size());
+  // determine which options have been explicitly set
+  options.memory_size_configured = _dcmd_memorysize.is_set();
+  options.global_buffer_size_configured = _dcmd_globalbuffersize.is_set();
+  options.buffer_count_configured = _dcmd_numglobalbuffers.is_set();
+  options.thread_buffer_size_configured = _dcmd_threadbuffersize.is_set();
+  assert(options.memory_size >= MIN_MEMORY_SIZE, "invariant");
+  assert(options.global_buffer_size >= MIN_GLOBAL_BUFFER_SIZE, "invariant");
+  assert(options.buffer_count >= MIN_BUFFER_COUNT, "invariant");
+  assert(options.thread_buffer_size >= MIN_THREAD_BUFFER_SIZE, "invariant");
+}
+
+template <typename Argument>
+static bool ensure_gteq(Argument& memory_argument, const jlong value) {
+  if ((jlong)memory_argument.value()._size < value) {
+    log_set_value(memory_argument);
+    log_lower_than_min_value(memory_argument, value);
+    return false;
+  }
+  return true;
+}
+
+static bool ensure_valid_minimum_sizes() {
+  // ensure valid minimum memory sizes
+  if (_dcmd_memorysize.is_set()) {
+    if (!ensure_gteq(_dcmd_memorysize, MIN_MEMORY_SIZE)) {
+      return false;
+    }
+  }
+  if (_dcmd_globalbuffersize.is_set()) {
+    if (!ensure_gteq(_dcmd_globalbuffersize, MIN_GLOBAL_BUFFER_SIZE)) {
+      return false;
+    }
+  }
+  if (_dcmd_numglobalbuffers.is_set()) {
+    if (!ensure_minimum_count(_dcmd_numglobalbuffers, MIN_BUFFER_COUNT)) {
+      return false;
+    }
+  }
+  if (_dcmd_threadbuffersize.is_set()) {
+    if (!ensure_gteq(_dcmd_threadbuffersize, MIN_THREAD_BUFFER_SIZE)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+/**
+ * Starting with the initial set of memory values from the user,
+ * sanitize, enforce min/max rules and adjust to a set of consistent options.
+ *
+ * Adjusted memory sizes will be page aligned.
+ */
+bool JfrOptionSet::adjust_memory_options() {
+  if (!ensure_valid_minimum_sizes()) {
+    return false;
+  }
+  JfrMemoryOptions options;
+  initialize_memory_options_from_dcmd(options);
+  if (!valid_memory_relations(options)) {
+    return false;
+  }
+  if (!JfrMemorySizer::adjust_options(&options)) {
+    if (!check_for_ambiguity(_dcmd_memorysize, _dcmd_globalbuffersize, _dcmd_numglobalbuffers)) {
+      return false;
+    }
+  }
+  post_process_adjusted_memory_options(options);
+  return true;
+}
+
+bool JfrOptionSet::parse_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
+  assert(option != NULL, "invariant");
+  assert(delimiter != NULL, "invariant");
+  assert((*option)->optionString != NULL, "invariant");
+  assert(strncmp((*option)->optionString, "-XX:FlightRecorderOptions", 25) == 0, "invariant");
+  if (*delimiter == '\0') {
+    // -XX:FlightRecorderOptions without any delimiter and values
+  } else {
+    // -XX:FlightRecorderOptions[=|:]
+    // set delimiter to '='
+    *delimiter = '=';
+  }
+  return false;
+}
+
+static GrowableArray<const char*>* startup_recording_options_array = NULL;
+
+bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** option, char* delimiter) {
+  assert(option != NULL, "invariant");
+  assert(delimiter != NULL, "invariant");
+  assert((*option)->optionString != NULL, "invariant");
+  assert(strncmp((*option)->optionString, "-XX:StartFlightRecording", 24) == 0, "invariant");
+  const char* value = NULL;
+  if (*delimiter == '\0') {
+    // -XX:StartFlightRecording without any delimiter and values
+    // Add dummy value "dumponexit=false" so -XX:StartFlightRecording can be used without explicit values.
+    // The existing option->optionString points to stack memory so no need to deallocate.
+    const_cast<JavaVMOption*>(*option)->optionString = (char*)"-XX:StartFlightRecording=dumponexit=false";
+    value = (*option)->optionString + 25;
+  } else {
+    // -XX:StartFlightRecording[=|:]
+    // set delimiter to '='
+    *delimiter = '=';
+    value = delimiter + 1;
+  }
+  assert(value != NULL, "invariant");
+  const size_t value_length = strlen(value);
+
+  if (startup_recording_options_array == NULL) {
+    startup_recording_options_array = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<const char*>(8, true, mtTracing);
+  }
+  assert(startup_recording_options_array != NULL, "invariant");
+  char* const startup_value = NEW_C_HEAP_ARRAY(char, value_length + 1, mtTracing);
+  strncpy(startup_value, value, value_length + 1);
+  assert(strncmp(startup_value, value, value_length) == 0, "invariant");
+  startup_recording_options_array->append(startup_value);
+  return false;
+}
+
+const GrowableArray<const char*>* JfrOptionSet::startup_recording_options() {
+  return startup_recording_options_array;
+}
+
+void JfrOptionSet::release_startup_recording_options() {
+  if (startup_recording_options_array != NULL) {
+    const int length = startup_recording_options_array->length();
+    for (int i = 0; i < length; ++i) {
+      FREE_C_HEAP_ARRAY(char, startup_recording_options_array->at(i), mtTracing);
+    }
+    delete startup_recording_options_array;
+    startup_recording_options_array = NULL;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrOptionSet.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_SERVICE_JFROPTIONSET_HPP
+#define SHARE_VM_JFR_RECORDER_SERVICE_JFROPTIONSET_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+#include "utilities/exceptions.hpp"
+
+template <typename>
+class GrowableArray;
+
+//
+// Command-line options and defaults
+//
+class JfrOptionSet : public AllStatic {
+  friend class JfrRecorder;
+ private:
+  static jlong _max_chunk_size;
+  static jlong _global_buffer_size;
+  static jlong _thread_buffer_size;
+  static jlong _memory_size;
+  static jlong _num_global_buffers;
+  static jlong _old_object_queue_size;
+  static u4 _stack_depth;
+  static jboolean _sample_threads;
+  static jboolean _retransform;
+  static jboolean _sample_protection;
+
+  static bool initialize(Thread* thread);
+  static bool configure(TRAPS);
+  static bool adjust_memory_options();
+
+ public:
+  static jlong max_chunk_size();
+  static void set_max_chunk_size(jlong value);
+  static jlong global_buffer_size();
+  static void set_global_buffer_size(jlong value);
+  static jlong thread_buffer_size();
+  static void set_thread_buffer_size(jlong value);
+  static jlong memory_size();
+  static void set_memory_size(jlong value);
+  static jlong num_global_buffers();
+  static void set_num_global_buffers(jlong value);
+  static jint old_object_queue_size();
+  static void set_old_object_queue_size(jlong value);
+  static u4 stackdepth();
+  static void set_stackdepth(u4 depth);
+  static bool sample_threads();
+  static void set_sample_threads(jboolean sample);
+  static bool can_retransform();
+  static void set_retransform(jboolean value);
+  static bool compressed_integers();
+  static bool allow_retransforms();
+  static bool allow_event_retransforms();
+  static bool sample_protection();
+  DEBUG_ONLY(static void set_sample_protection(jboolean protection);)
+
+  static bool parse_flight_recorder_option(const JavaVMOption** option, char* delimiter);
+  static bool parse_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
+  static const GrowableArray<const char*>* startup_recording_options();
+  static void release_startup_recording_options();
+};
+
+#endif // SHARE_VM_JFR_RECORDER_SERVICE_JFROPTIONSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrPostBox.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/service/jfrPostBox.hpp"
+#include "jfr/utilities/jfrTryLock.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/thread.inline.hpp"
+
+#define MSG_IS_SYNCHRONOUS ( (MSGBIT(MSG_ROTATE)) |          \
+                             (MSGBIT(MSG_STOP))   |          \
+                             (MSGBIT(MSG_START))  |          \
+                             (MSGBIT(MSG_CLONE_IN_MEMORY)) | \
+                             (MSGBIT(MSG_VM_ERROR))          \
+                           )
+
+static JfrPostBox* _instance = NULL;
+
+JfrPostBox& JfrPostBox::instance() {
+  return *_instance;
+}
+
+JfrPostBox* JfrPostBox::create() {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrPostBox();
+  return _instance;
+}
+
+void JfrPostBox::destroy() {
+  assert(_instance != NULL, "invariant");
+  delete _instance;
+  _instance = NULL;
+}
+
+JfrPostBox::JfrPostBox() :
+  _msg_read_serial(0),
+  _msg_handled_serial(0),
+  _messages(0),
+  _has_waiters(false) {}
+
+static bool is_thread_lock_aversive() {
+  Thread* const thread = Thread::current();
+  return (thread->is_Java_thread() && ((JavaThread*)thread)->thread_state() != _thread_in_vm) || thread->is_VM_thread();
+}
+
+static bool is_synchronous(int messages) {
+  return ((messages & MSG_IS_SYNCHRONOUS) != 0);
+}
+
+void JfrPostBox::post(JFR_Msg msg) {
+  const int the_message = MSGBIT(msg);
+  if (is_thread_lock_aversive()) {
+    deposit(the_message);
+    return;
+  }
+  if (!is_synchronous(the_message)) {
+    asynchronous_post(the_message);
+    return;
+  }
+  synchronous_post(the_message);
+}
+
+void JfrPostBox::deposit(int new_messages) {
+  while (true) {
+    const int current_msgs = OrderAccess::load_acquire((int*)&_messages);
+    // OR the new message
+    const int exchange_value = current_msgs | new_messages;
+    const int result = Atomic::cmpxchg(exchange_value, &_messages, current_msgs);
+    if (result == current_msgs) {
+      return;
+    }
+    /* Some other thread just set exactly what this thread wanted */
+    if ((result & new_messages) == new_messages) {
+      return;
+    }
+  }
+}
+
+void JfrPostBox::asynchronous_post(int msg) {
+  assert(!is_synchronous(msg), "invariant");
+  deposit(msg);
+  JfrMonitorTryLock try_msg_lock(JfrMsg_lock);
+  if (try_msg_lock.acquired()) {
+    JfrMsg_lock->notify_all();
+  }
+}
+
+void JfrPostBox::synchronous_post(int msg) {
+  assert(is_synchronous(msg), "invariant");
+  assert(!JfrMsg_lock->owned_by_self(), "should not hold JfrMsg_lock here!");
+  MutexLockerEx msg_lock(JfrMsg_lock);
+  deposit(msg);
+  // serial_id is used to check when what we send in has been processed.
+  // _msg_read_serial is read under JfrMsg_lock protection.
+  const uintptr_t serial_id = (uintptr_t)OrderAccess::load_ptr_acquire((intptr_t*)&_msg_read_serial) + 1;
+  JfrMsg_lock->notify_all();
+  while (!is_message_processed(serial_id)) {
+    JfrMsg_lock->wait();
+  }
+}
+
+/*
+ * Check if a synchronous message has been processed.
+ * We avoid racing on _msg_handled_serial by ensuring
+ * that we are holding the JfrMsg_lock when checking
+ * completion status.
+ */
+bool JfrPostBox::is_message_processed(uintptr_t serial_id) const {
+  assert(JfrMsg_lock->owned_by_self(), "_msg_handled_serial must be read under JfrMsg_lock protection");
+  return serial_id <= (uintptr_t)OrderAccess::load_ptr_acquire((intptr_t*)&_msg_handled_serial);
+}
+
+bool JfrPostBox::is_empty() const {
+  assert(JfrMsg_lock->owned_by_self(), "not holding JfrMsg_lock!");
+  return OrderAccess::load_acquire((int*)&_messages) == 0;
+}
+
+int JfrPostBox::collect() {
+  // get pending and reset to 0
+  const int messages = Atomic::xchg(0, &_messages);
+  if (check_waiters(messages)) {
+    _has_waiters = true;
+    assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_read_serial is protected by JfrMsg_lock");
+    // Update made visible on release of JfrMsg_lock via fence instruction in Monitor::IUnlock.
+    ++_msg_read_serial;
+  }
+  return messages;
+}
+
+bool JfrPostBox::check_waiters(int messages) const {
+  assert(JfrMsg_lock->owned_by_self(), "not holding JfrMsg_lock!");
+  assert(!_has_waiters, "invariant");
+  return is_synchronous(messages);
+}
+
+void JfrPostBox::notify_waiters() {
+  if (!_has_waiters) {
+    return;
+  }
+  _has_waiters = false;
+  assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_handled_serial is protected by JfrMsg_lock.");
+  // Update made visible on release of JfrMsg_lock via fence instruction in Monitor::IUnlock.
+  ++_msg_handled_serial;
+  JfrMsg_lock->notify();
+}
+
+// safeguard to ensure no threads are left waiting
+void JfrPostBox::notify_collection_stop() {
+  MutexLockerEx msg_lock(JfrMsg_lock);
+  JfrMsg_lock->notify_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrPostBox.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_SERVICE_JFRPOSTBOX_HPP
+#define SHARE_VM_JFR_RECORDER_SERVICE_JFRPOSTBOX_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+#define MSGBIT(e) (1<<(e))
+
+enum JFR_Msg {
+  MSG_ALL_MSGS = -1,
+  MSG_CLONE_IN_MEMORY = 0,
+  MSG_START,
+  MSG_STOP,
+  MSG_ROTATE,
+  MSG_FULLBUFFER,
+  MSG_CHECKPOINT,
+  MSG_WAKEUP,
+  MSG_SHUTDOWN,
+  MSG_VM_ERROR,
+  MSG_DEADBUFFER,
+  MSG_NO_OF_MSGS
+};
+
+/**
+ *  Jfr messaging.
+ *
+ *  Synchronous messages (posting thread waits for message completion):
+ *
+ *  MSG_CLONE_IN_MEMORY (0) ; MSGBIT(MSG_CLONE_IN_MEMORY) == (1 << 0) == 0x1
+ *  MSG_START(1)            ; MSGBIT(MSG_START) == (1 << 0x1) == 0x2
+ *  MSG_STOP (2)            ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
+ *  MSG_ROTATE (3)          ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
+ *  MSG_VM_ERROR (8)        ; MSGBIT(MSG_VM_ERROR) == (1 << 8) == 0x100
+ *
+ *  Asynchronous messages (posting thread returns immediately upon deposit):
+ *
+ *  MSG_FULLBUFFER (4)      ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
+ *  MSG_CHECKPOINT (5)      ; MSGBIT(CHECKPOINT) == (1 << 5) == 0x20
+ *  MSG_WAKEUP (6)          ; MSGBIT(WAKEUP) == (1 << 6) == 0x40
+ *  MSG_SHUTDOWN (7)        ; MSGBIT(MSG_SHUTDOWN) == (1 << 7) == 0x80
+ *  MSG_DEADBUFFER (9)      ; MSGBIT(MSG_DEADBUFFER) == (1 << 9) == 0x200
+ */
+
+class JfrPostBox : public JfrCHeapObj {
+  friend class JfrRecorder;
+ public:
+  void post(JFR_Msg msg);
+
+ private:
+  uintptr_t _msg_read_serial;
+  uintptr_t _msg_handled_serial;
+  volatile int _messages;
+  bool _has_waiters;
+
+  JfrPostBox();
+  static JfrPostBox& instance();
+  static JfrPostBox* create();
+  static void destroy();
+
+  void asynchronous_post(int msg);
+  void synchronous_post(int msg);
+  void deposit(int new_messages);
+  bool is_message_processed(uintptr_t serial_id) const;
+
+  friend void recorderthread_entry(JavaThread*, Thread*);
+  // for the friend declaration above
+  bool is_empty() const;
+  int collect();
+  bool check_waiters(int messages) const;
+  void notify_waiters();
+  void notify_collection_stop();
+};
+
+#endif // SHARE_VM_JFR_RECORDER_SERVICE_JFRPOSTBOX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrRecorderService.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,541 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
+#include "jfr/recorder/repository/jfrChunkSizeNotifier.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/repository/jfrRepository.hpp"
+#include "jfr/recorder/service/jfrPostBox.hpp"
+#include "jfr/recorder/service/jfrRecorderService.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/recorder/storage/jfrStorage.hpp"
+#include "jfr/recorder/storage/jfrStorageControl.hpp"
+#include "jfr/recorder/stringpool/jfrStringPool.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/writers/jfrJavaEventWriter.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/vm_operations.hpp"
+#include "runtime/vmThread.hpp"
+
+// set data iff *dest == NULL
+static bool try_set(void* const data, void** dest, bool clear) {
+  assert(data != NULL, "invariant");
+  void* const current = OrderAccess::load_ptr_acquire(dest);
+  if (current != NULL) {
+    if (current != data) {
+      // already set
+      return false;
+    }
+    assert(current == data, "invariant");
+    if (!clear) {
+      // recursion disallowed
+      return false;
+    }
+  }
+  return Atomic::cmpxchg_ptr(clear ? NULL : data, dest, current) == current;
+}
+
+static void* rotation_thread = NULL;
+static const int rotation_try_limit = 1000;
+static const int rotation_retry_sleep_millis = 10;
+
+class RotationLock : public StackObj {
+ private:
+  Thread* const _thread;
+  bool _acquired;
+
+  void log(bool recursion) {
+    assert(!_acquired, "invariant");
+    const char* error_msg = NULL;
+    if (recursion) {
+      error_msg = "Unable to issue rotation due to recursive calls.";
+    }
+    else {
+      error_msg = "Unable to issue rotation due to wait timeout.";
+    }
+    if (LogJFR) tty->print_cr( // For user, should not be "jfr, system"
+      "%s", error_msg);
+  }
+ public:
+  RotationLock(Thread* thread) : _thread(thread), _acquired(false) {
+    assert(_thread != NULL, "invariant");
+    if (_thread == rotation_thread) {
+      // recursion not supported
+      log(true);
+      return;
+    }
+
+    // limited to not spin indefinitely
+    for (int i = 0; i < rotation_try_limit; ++i) {
+      if (try_set(_thread, &rotation_thread, false)) {
+        _acquired = true;
+        assert(_thread == rotation_thread, "invariant");
+        return;
+      }
+      if (_thread->is_Java_thread()) {
+        // in order to allow the system to move to a safepoint
+        MutexLockerEx msg_lock(JfrMsg_lock);
+        JfrMsg_lock->wait(false, rotation_retry_sleep_millis);
+      }
+      else {
+        os::naked_short_sleep(rotation_retry_sleep_millis);
+      }
+    }
+    log(false);
+  }
+
+  ~RotationLock() {
+    assert(_thread != NULL, "invariant");
+    if (_acquired) {
+      assert(_thread == rotation_thread, "invariant");
+      while (!try_set(_thread, &rotation_thread, true));
+    }
+  }
+  bool not_acquired() const { return !_acquired; }
+};
+
+static intptr_t write_checkpoint_event_prologue(JfrChunkWriter& cw, u8 type_id) {
+  const intptr_t prev_cp_offset = cw.previous_checkpoint_offset();
+  const intptr_t prev_cp_relative_offset = 0 == prev_cp_offset ? 0 : prev_cp_offset - cw.current_offset();
+  cw.reserve(sizeof(u4));
+  cw.write<u8>(EVENT_CHECKPOINT);
+  cw.write(JfrTicks::now());
+  cw.write<jlong>((jlong)0);
+  cw.write<jlong>((jlong)prev_cp_relative_offset); // write previous checkpoint offset delta
+  cw.write<bool>(false); // flushpoint
+  cw.write<u4>((u4)1); // nof types in this checkpoint
+  cw.write<u8>(type_id);
+  const intptr_t number_of_elements_offset = cw.current_offset();
+  cw.reserve(sizeof(u4));
+  return number_of_elements_offset;
+}
+
+template <typename ContentFunctor>
+class WriteCheckpointEvent : public StackObj {
+ private:
+  JfrChunkWriter& _cw;
+  u8 _type_id;
+  ContentFunctor& _content_functor;
+ public:
+  WriteCheckpointEvent(JfrChunkWriter& cw, u8 type_id, ContentFunctor& functor) :
+    _cw(cw),
+    _type_id(type_id),
+    _content_functor(functor) {
+    assert(_cw.is_valid(), "invariant");
+  }
+  bool process() {
+    // current_cp_offset is also offset for the event size header field
+    const intptr_t current_cp_offset = _cw.current_offset();
+    const intptr_t num_elements_offset = write_checkpoint_event_prologue(_cw, _type_id);
+    // invocation
+    _content_functor.process();
+    const u4 number_of_elements = (u4)_content_functor.processed();
+    if (number_of_elements == 0) {
+      // nothing to do, rewind writer to start
+      _cw.seek(current_cp_offset);
+      return true;
+    }
+    assert(number_of_elements > 0, "invariant");
+    assert(_cw.current_offset() > num_elements_offset, "invariant");
+    _cw.write_padded_at_offset<u4>(number_of_elements, num_elements_offset);
+    _cw.write_padded_at_offset<u4>((u4)_cw.current_offset() - current_cp_offset, current_cp_offset);
+    // update writer with last checkpoint position
+    _cw.set_previous_checkpoint_offset(current_cp_offset);
+    return true;
+  }
+};
+
+template <typename Instance, size_t(Instance::*func)()>
+class ServiceFunctor {
+ private:
+  Instance& _instance;
+  size_t _processed;
+ public:
+  ServiceFunctor(Instance& instance) : _instance(instance), _processed(0) {}
+  bool process() {
+    _processed = (_instance.*func)();
+    return true;
+  }
+  size_t processed() const { return _processed; }
+};
+
+template <typename Instance, void(Instance::*func)()>
+class JfrVMOperation : public VM_Operation {
+ private:
+  Instance& _instance;
+ public:
+  JfrVMOperation(Instance& instance) : _instance(instance) {}
+  void doit() { (_instance.*func)(); }
+  VMOp_Type type() const { return VMOp_JFRCheckpoint; }
+  Mode evaluation_mode() const { return _safepoint; } // default
+};
+
+class WriteStackTraceRepository : public StackObj {
+ private:
+  JfrStackTraceRepository& _repo;
+  JfrChunkWriter& _cw;
+  size_t _elements_processed;
+  bool _clear;
+
+ public:
+  WriteStackTraceRepository(JfrStackTraceRepository& repo, JfrChunkWriter& cw, bool clear) :
+    _repo(repo), _cw(cw), _elements_processed(0), _clear(clear) {}
+  bool process() {
+    _elements_processed = _repo.write(_cw, _clear);
+    return true;
+  }
+  size_t processed() const { return _elements_processed; }
+  void reset() { _elements_processed = 0; }
+};
+
+static bool recording = false;
+
+static void set_recording_state(bool is_recording) {
+  OrderAccess::storestore();
+  recording = is_recording;
+}
+
+bool JfrRecorderService::is_recording() {
+  return recording;
+}
+
+JfrRecorderService::JfrRecorderService() :
+  _checkpoint_manager(JfrCheckpointManager::instance()),
+  _chunkwriter(JfrRepository::chunkwriter()),
+  _repository(JfrRepository::instance()),
+  _storage(JfrStorage::instance()),
+  _stack_trace_repository(JfrStackTraceRepository::instance()),
+  _string_pool(JfrStringPool::instance()) {}
+
+void JfrRecorderService::start() {
+  RotationLock rl(Thread::current());
+  if (rl.not_acquired()) {
+    return;
+  }
+  if (LogJFR) tty->print_cr("Request to START recording");
+  assert(!is_recording(), "invariant");
+  clear();
+  set_recording_state(true);
+  assert(is_recording(), "invariant");
+  open_new_chunk();
+  if (LogJFR) tty->print_cr("Recording STARTED");
+}
+
+void JfrRecorderService::clear() {
+  ResourceMark rm;
+  HandleMark hm;
+  pre_safepoint_clear();
+  invoke_safepoint_clear();
+  post_safepoint_clear();
+}
+
+void JfrRecorderService::pre_safepoint_clear() {
+  _stack_trace_repository.clear();
+  _string_pool.clear();
+  _storage.clear();
+}
+
+void JfrRecorderService::invoke_safepoint_clear() {
+  JfrVMOperation<JfrRecorderService, &JfrRecorderService::safepoint_clear> safepoint_task(*this);
+  VMThread::execute(&safepoint_task);
+}
+
+//
+// safepoint clear sequence
+//
+//  clear stacktrace repository ->
+//    clear string pool ->
+//      clear storage ->
+//        shift epoch ->
+//          update time
+//
+void JfrRecorderService::safepoint_clear() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  _stack_trace_repository.clear();
+  _string_pool.clear();
+  _storage.clear();
+  _checkpoint_manager.shift_epoch();
+  _chunkwriter.time_stamp_chunk_now();
+}
+
+void JfrRecorderService::post_safepoint_clear() {
+  _checkpoint_manager.clear();
+}
+
+static void stop() {
+  assert(JfrRecorderService::is_recording(), "invariant");
+  if (LogJFR) tty->print_cr("Recording STOPPED");
+  set_recording_state(false);
+  assert(!JfrRecorderService::is_recording(), "invariant");
+}
+
+void JfrRecorderService::rotate(int msgs) {
+  RotationLock rl(Thread::current());
+  if (rl.not_acquired()) {
+    return;
+  }
+  static bool vm_error = false;
+  if (msgs & MSGBIT(MSG_VM_ERROR)) {
+    vm_error = true;
+    prepare_for_vm_error_rotation();
+  }
+  if (msgs & (MSGBIT(MSG_STOP))) {
+    stop();
+  }
+  // action determined by chunkwriter state
+  if (!_chunkwriter.is_valid()) {
+    in_memory_rotation();
+    return;
+  }
+  if (vm_error) {
+    vm_error_rotation();
+    return;
+  }
+  chunk_rotation();
+}
+
+void JfrRecorderService::prepare_for_vm_error_rotation() {
+  if (!_chunkwriter.is_valid()) {
+    open_new_chunk(true);
+  }
+  _checkpoint_manager.register_service_thread(Thread::current());
+}
+
+void JfrRecorderService::open_new_chunk(bool vm_error) {
+  assert(!_chunkwriter.is_valid(), "invariant");
+  assert(!JfrStream_lock->owned_by_self(), "invariant");
+  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+  if (!_repository.open_chunk(vm_error)) {
+    assert(!_chunkwriter.is_valid(), "invariant");
+    _storage.control().set_to_disk(false);
+    return;
+  }
+  assert(_chunkwriter.is_valid(), "invariant");
+  _storage.control().set_to_disk(true);
+}
+
+void JfrRecorderService::in_memory_rotation() {
+  assert(!_chunkwriter.is_valid(), "invariant");
+  // currently running an in-memory recording
+  open_new_chunk();
+  if (_chunkwriter.is_valid()) {
+    // dump all in-memory buffer data to the newly created chunk
+    serialize_storage_from_in_memory_recording();
+  }
+}
+
+void JfrRecorderService::serialize_storage_from_in_memory_recording() {
+  assert(!JfrStream_lock->owned_by_self(), "not holding stream lock!");
+  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+  _storage.write();
+}
+
+void JfrRecorderService::chunk_rotation() {
+  finalize_current_chunk();
+  open_new_chunk();
+}
+
+void JfrRecorderService::finalize_current_chunk() {
+  assert(_chunkwriter.is_valid(), "invariant");
+  write();
+  assert(!_chunkwriter.is_valid(), "invariant");
+}
+
+void JfrRecorderService::write() {
+  ResourceMark rm;
+  HandleMark hm;
+  pre_safepoint_write();
+  invoke_safepoint_write();
+  post_safepoint_write();
+}
+
+typedef ServiceFunctor<JfrStringPool, &JfrStringPool::write> WriteStringPool;
+typedef ServiceFunctor<JfrStringPool, &JfrStringPool::write_at_safepoint> WriteStringPoolSafepoint;
+typedef WriteCheckpointEvent<WriteStackTraceRepository> WriteStackTraceCheckpoint;
+typedef WriteCheckpointEvent<WriteStringPool> WriteStringPoolCheckpoint;
+typedef WriteCheckpointEvent<WriteStringPoolSafepoint> WriteStringPoolCheckpointSafepoint;
+
+static void write_stacktrace_checkpoint(JfrStackTraceRepository& stack_trace_repo, JfrChunkWriter& chunkwriter, bool clear) {
+  WriteStackTraceRepository write_stacktrace_repo(stack_trace_repo, chunkwriter, clear);
+  WriteStackTraceCheckpoint write_stack_trace_checkpoint(chunkwriter, TYPE_STACKTRACE, write_stacktrace_repo);
+  write_stack_trace_checkpoint.process();
+}
+
+static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
+  WriteStringPool write_string_pool(string_pool);
+  WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
+  write_string_pool_checkpoint.process();
+}
+
+static void write_stringpool_checkpoint_safepoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
+  WriteStringPoolSafepoint write_string_pool(string_pool);
+  WriteStringPoolCheckpointSafepoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
+  write_string_pool_checkpoint.process();
+}
+
+//
+// pre-safepoint write sequence
+//
+//  lock stream lock ->
+//    write non-safepoint dependent types ->
+//      write checkpoint epoch transition list->
+//        write stack trace checkpoint ->
+//          write string pool checkpoint ->
+//            write storage ->
+//              release stream lock
+//
+void JfrRecorderService::pre_safepoint_write() {
+  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+  assert(_chunkwriter.is_valid(), "invariant");
+  _checkpoint_manager.write_types();
+  _checkpoint_manager.write_epoch_transition_mspace();
+  write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, false);
+  write_stringpool_checkpoint(_string_pool, _chunkwriter);
+  _storage.write();
+}
+
+void JfrRecorderService::invoke_safepoint_write() {
+  JfrVMOperation<JfrRecorderService, &JfrRecorderService::safepoint_write> safepoint_task(*this);
+  VMThread::execute(&safepoint_task);
+}
+
+static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_repository) {
+  WriteObjectSampleStacktrace object_sample_stacktrace(stack_trace_repository);
+  object_sample_stacktrace.process();
+}
+
+//
+// safepoint write sequence
+//
+//   lock stream lock ->
+//     write object sample stacktraces ->
+//       write stacktrace repository ->
+//         write string pool ->
+//           write safepoint dependent types ->
+//             write storage ->
+//                 shift_epoch ->
+//                   update time ->
+//                     lock metadata descriptor ->
+//                       release stream lock
+//
+void JfrRecorderService::safepoint_write() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+  write_object_sample_stacktrace(_stack_trace_repository);
+  write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, true);
+  write_stringpool_checkpoint_safepoint(_string_pool, _chunkwriter);
+  _checkpoint_manager.write_safepoint_types();
+  _storage.write_at_safepoint();
+  _checkpoint_manager.shift_epoch();
+  _chunkwriter.time_stamp_chunk_now();
+  JfrMetadataEvent::lock();
+}
+
+static jlong write_metadata_event(JfrChunkWriter& chunkwriter) {
+  assert(chunkwriter.is_valid(), "invariant");
+  const jlong metadata_offset = chunkwriter.current_offset();
+  JfrMetadataEvent::write(chunkwriter, metadata_offset);
+  return metadata_offset;
+}
+
+//
+// post-safepoint write sequence
+//
+//  lock stream lock ->
+//    write type set ->
+//      write checkpoints ->
+//        write metadata event ->
+//          write chunk header ->
+//            close chunk fd ->
+//              release stream lock
+//
+void JfrRecorderService::post_safepoint_write() {
+  assert(_chunkwriter.is_valid(), "invariant");
+  // During the safepoint tasks just completed, the system transitioned to a new epoch.
+  // Type tagging is epoch relative which entails we are able to write out the
+  // already tagged artifacts for the previous epoch. We can accomplish this concurrently
+  // with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint.
+  _checkpoint_manager.write_type_set();
+  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+  // serialize any outstanding checkpoint memory
+  _checkpoint_manager.write();
+  // serialize the metadata descriptor event and close out the chunk
+  _repository.close_chunk(write_metadata_event(_chunkwriter));
+  assert(!_chunkwriter.is_valid(), "invariant");
+}
+
+void JfrRecorderService::vm_error_rotation() {
+  if (_chunkwriter.is_valid()) {
+    finalize_current_chunk_on_vm_error();
+    assert(!_chunkwriter.is_valid(), "invariant");
+    _repository.on_vm_error();
+  }
+}
+
+void JfrRecorderService::finalize_current_chunk_on_vm_error() {
+  assert(_chunkwriter.is_valid(), "invariant");
+  pre_safepoint_write();
+  JfrMetadataEvent::lock();
+  // Do not attempt safepoint dependent operations during emergency dump.
+  // Optimistically write tagged artifacts.
+  _checkpoint_manager.shift_epoch();
+  _checkpoint_manager.write_type_set();
+  // update time
+  _chunkwriter.time_stamp_chunk_now();
+  post_safepoint_write();
+  assert(!_chunkwriter.is_valid(), "invariant");
+}
+
+void JfrRecorderService::process_full_buffers() {
+  if (_chunkwriter.is_valid()) {
+    assert(!JfrStream_lock->owned_by_self(), "invariant");
+    MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+    _storage.write_full();
+  }
+}
+
+void JfrRecorderService::scavenge() {
+  _storage.scavenge();
+}
+
+void JfrRecorderService::evaluate_chunk_size_for_rotation() {
+  const size_t size_written = _chunkwriter.size_written();
+  if (size_written > JfrChunkSizeNotifier::chunk_size_threshold()) {
+    JfrChunkSizeNotifier::notify();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrRecorderService.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_SERVICE_JFRRECORDERSERVICE_HPP
+#define SHARE_VM_JFR_RECORDER_SERVICE_JFRRECORDERSERVICE_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JfrCheckpointManager;
+class JfrChunkWriter;
+class JfrRepository;
+class JfrStackTraceRepository;
+class JfrStorage;
+class JfrStringPool;
+
+class JfrRecorderService : public StackObj {
+ private:
+  JfrCheckpointManager& _checkpoint_manager;
+  JfrChunkWriter& _chunkwriter;
+  JfrRepository& _repository;
+  JfrStackTraceRepository& _stack_trace_repository;
+  JfrStorage& _storage;
+  JfrStringPool& _string_pool;
+
+  void open_new_chunk(bool vm_error = false);
+  void chunk_rotation();
+  void in_memory_rotation();
+  void serialize_storage_from_in_memory_recording();
+  void finalize_current_chunk();
+  void finalize_current_chunk_on_vm_error();
+  void prepare_for_vm_error_rotation();
+  void vm_error_rotation();
+
+  void clear();
+  void pre_safepoint_clear();
+  void safepoint_clear();
+  void invoke_safepoint_clear();
+  void post_safepoint_clear();
+
+  void write();
+  void pre_safepoint_write();
+  void safepoint_write();
+  void invoke_safepoint_write();
+  void post_safepoint_write();
+
+ public:
+  JfrRecorderService();
+  void start();
+  void rotate(int msgs);
+  void process_full_buffers();
+  void scavenge();
+  void evaluate_chunk_size_for_rotation();
+  static bool is_recording();
+};
+
+#endif // SHARE_VM_JFR_RECORDER_SERVICE_JFRRECORDERSERVICE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrRecorderThread.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jni.h"
+#include "classfile/javaClasses.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/service/jfrRecorderThread.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/preserveException.hpp"
+#include "utilities/macros.hpp"
+
+static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAPS) {
+  assert(thread_oop.not_null(), "invariant");
+  assert(proc != NULL, "invariant");
+
+  bool allocation_failed = false;
+  JavaThread* new_thread = NULL;
+  {
+    MutexLocker mu(Threads_lock);
+    new_thread = new JavaThread(proc);
+    // At this point it may be possible that no
+    // osthread was created for the JavaThread due to lack of memory.
+    if (new_thread == NULL || new_thread->osthread() == NULL) {
+      delete new_thread;
+      allocation_failed = true;
+    } else {
+      java_lang_Thread::set_thread(thread_oop(), new_thread);
+      java_lang_Thread::set_priority(thread_oop(), NormPriority);
+      java_lang_Thread::set_daemon(thread_oop());
+      new_thread->set_threadObj(thread_oop());
+      Threads::add(new_thread);
+    }
+  }
+  if (allocation_failed) {
+    JfrJavaSupport::throw_out_of_memory_error("Unable to create native recording thread for JFR", CHECK_NULL);
+  }
+
+  Thread::start(new_thread);
+  return new_thread;
+}
+
+JfrPostBox* JfrRecorderThread::_post_box = NULL;
+
+JfrPostBox& JfrRecorderThread::post_box() {
+  return *_post_box;
+}
+
+// defined in JfrRecorderThreadLoop.cpp
+void recorderthread_entry(JavaThread*, Thread*);
+
+bool JfrRecorderThread::start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS) {
+  assert(cp_manager != NULL, "invariant");
+  assert(post_box != NULL, "invariant");
+  _post_box = post_box;
+
+  static const char klass[] = "jdk/jfr/internal/JVMUpcalls";
+  static const char method[] = "createRecorderThread";
+  static const char signature[] = "(Ljava/lang/ThreadGroup;Ljava/lang/ClassLoader;)Ljava/lang/Thread;";
+
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments create_thread_args(&result, klass, method, signature, CHECK_false);
+
+  // arguments
+  create_thread_args.push_oop(Universe::system_thread_group());
+  create_thread_args.push_oop(SystemDictionary::java_system_loader());
+
+  JfrJavaSupport::call_static(&create_thread_args, CHECK_false);
+  instanceHandle h_thread_oop(THREAD, (instanceOop)result.get_jobject());
+  assert(h_thread_oop.not_null(), "invariant");
+  // attempt thread start
+  const Thread* const t = start_thread(h_thread_oop, recorderthread_entry,THREAD);
+  if (!HAS_PENDING_EXCEPTION) {
+    cp_manager->register_service_thread(t);
+    return true;
+  }
+  assert(HAS_PENDING_EXCEPTION, "invariant");
+  // Start failed, remove the thread from the system thread group
+  JavaValue void_result(T_VOID);
+  JfrJavaArguments remove_thread_args(&void_result);
+  remove_thread_args.set_klass(SystemDictionary::ThreadGroup_klass());
+  remove_thread_args.set_name(vmSymbols::remove_method_name());
+  remove_thread_args.set_signature(vmSymbols::thread_void_signature());
+  remove_thread_args.set_receiver(Universe::system_thread_group());
+  remove_thread_args.push_oop(h_thread_oop());
+  CautiouslyPreserveExceptionMark cpe(THREAD);
+  JfrJavaSupport::call_special(&remove_thread_args, THREAD);
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrRecorderThread.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_SERVICE_JFRRECORDERTHREAD_HPP
+#define SHARE_VM_JFR_RECORDER_SERVICE_JFRRECORDERTHREAD_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class JavaThread;
+class JfrCheckpointManager;
+class JfrPostBox;
+class Thread;
+
+class JfrRecorderThread : AllStatic {
+ private:
+  static JfrPostBox* _post_box;
+
+ public:
+  static JfrPostBox& post_box();
+  static bool start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_SERVICE_JFRRECORDERTHREAD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/service/jfrRecorderThreadLoop.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/service/jfrPostBox.hpp"
+#include "jfr/recorder/service/jfrRecorderService.hpp"
+#include "jfr/recorder/service/jfrRecorderThread.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.inline.hpp"
+
+//
+// Entry point for "JFR Recorder Thread" message loop.
+// The recorder thread executes service requests collected from the message system.
+//
+void recorderthread_entry(JavaThread* thread, Thread* unused) {
+  assert(thread != NULL, "invariant");
+  #define START (msgs & (MSGBIT(MSG_START)))
+  #define SHUTDOWN (msgs & MSGBIT(MSG_SHUTDOWN))
+  #define ROTATE (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)))
+  #define PROCESS_FULL_BUFFERS (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)|MSGBIT(MSG_FULLBUFFER)))
+  #define SCAVENGE (msgs & (MSGBIT(MSG_DEADBUFFER)))
+
+  JfrPostBox& post_box = JfrRecorderThread::post_box();
+  if (LogJFR) tty->print_cr("Recorder thread STARTED");
+
+  {
+    bool done = false;
+    int msgs = 0;
+    JfrRecorderService service;
+    MutexLockerEx msg_lock(JfrMsg_lock);
+
+    // JFR MESSAGE LOOP PROCESSING - BEGIN
+    while (!done) {
+      if (post_box.is_empty()) {
+        JfrMsg_lock->wait(false);
+      }
+      msgs = post_box.collect();
+      JfrMsg_lock->unlock();
+      if (PROCESS_FULL_BUFFERS) {
+        service.process_full_buffers();
+      }
+      if (SCAVENGE) {
+        service.scavenge();
+      }
+      // Check amount of data written to chunk already
+      // if it warrants asking for a new chunk
+      service.evaluate_chunk_size_for_rotation();
+      if (START) {
+        service.start();
+      } else if (ROTATE) {
+        service.rotate(msgs);
+      }
+      JfrMsg_lock->lock();
+      post_box.notify_waiters();
+      if (SHUTDOWN) {
+        if (LogJFR) tty->print_cr("Request to STOP recorder");
+        done = true;
+      }
+    } // JFR MESSAGE LOOP PROCESSING - END
+
+  } // JfrMsg_lock scope
+
+  assert(!JfrMsg_lock->owned_by_self(), "invariant");
+  post_box.notify_collection_stop();
+  JfrRecorder::on_recorder_thread_exit();
+
+  #undef START
+  #undef SHUTDOWN
+  #undef ROTATE
+  #undef PROCESS_FULL_BUFFERS
+  #undef SCAVENGE
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/metadata/jfrSerializer.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/task.hpp"
+#include "runtime/vframe.hpp"
+
+class vframeStreamSamples : public vframeStreamCommon {
+ public:
+  // constructor that starts with sender of frame fr (top_frame)
+  vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub);
+  void samples_next();
+  void stop() {}
+};
+
+vframeStreamSamples::vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
+  _stop_at_java_call_stub = stop_at_java_call_stub;
+  _frame = fr;
+
+  // We must always have a valid frame to start filling
+  bool filled_in = fill_from_frame();
+  assert(filled_in, "invariant");
+}
+
+// Solaris SPARC Compiler1 needs an additional check on the grandparent
+// of the top_frame when the parent of the top_frame is interpreted and
+// the grandparent is compiled. However, in this method we do not know
+// the relationship of the current _frame relative to the top_frame so
+// we implement a more broad sanity check. When the previous callee is
+// interpreted and the current sender is compiled, we verify that the
+// current sender is also walkable. If it is not walkable, then we mark
+// the current vframeStream as at the end.
+void vframeStreamSamples::samples_next() {
+  // handle frames with inlining
+  if (_mode == compiled_mode &&
+      vframeStreamCommon::fill_in_compiled_inlined_sender()) {
+    return;
+  }
+
+  // handle general case
+  int loop_count = 0;
+  int loop_max = MaxJavaStackTraceDepth * 2;
+  do {
+    loop_count++;
+    // By the time we get here we should never see unsafe but better safe then segv'd
+    if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
+      _mode = at_end_mode;
+      return;
+    }
+    _frame = _frame.sender(&_reg_map);
+  } while (!fill_from_frame());
+}
+
+static JfrStackTraceRepository* _instance = NULL;
+
+JfrStackTraceRepository& JfrStackTraceRepository::instance() {
+  return *_instance;
+}
+
+JfrStackTraceRepository* JfrStackTraceRepository::create() {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrStackTraceRepository();
+  return _instance;
+}
+
+void JfrStackTraceRepository::destroy() {
+  assert(_instance != NULL, "invarinat");
+  delete _instance;
+  _instance = NULL;
+}
+
+JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
+  memset(_table, 0, sizeof(_table));
+}
+class JfrFrameType : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer) {
+    writer.write_count(JfrStackFrame::NUM_FRAME_TYPES);
+    writer.write_key(JfrStackFrame::FRAME_INTERPRETER);
+    writer.write("Interpreted");
+    writer.write_key(JfrStackFrame::FRAME_JIT);
+    writer.write("JIT compiled");
+    writer.write_key(JfrStackFrame::FRAME_INLINE);
+    writer.write("Inlined");
+    writer.write_key(JfrStackFrame::FRAME_NATIVE);
+    writer.write("Native");
+  }
+};
+
+bool JfrStackTraceRepository::initialize() {
+  return JfrSerializer::register_serializer(TYPE_FRAMETYPE, false, true, new JfrFrameType());
+}
+
+size_t JfrStackTraceRepository::clear() {
+  MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+  if (_entries == 0) {
+    return 0;
+  }
+  for (u4 i = 0; i < TABLE_SIZE; ++i) {
+    JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
+    while (stacktrace != NULL) {
+      JfrStackTraceRepository::StackTrace* next = stacktrace->next();
+      delete stacktrace;
+      stacktrace = next;
+    }
+  }
+  memset(_table, 0, sizeof(_table));
+  const size_t processed = _entries;
+  _entries = 0;
+  return processed;
+}
+
+traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
+  MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+  const size_t index = stacktrace._hash % TABLE_SIZE;
+  const StackTrace* table_entry = _table[index];
+
+  while (table_entry != NULL) {
+    if (table_entry->equals(stacktrace)) {
+      return table_entry->id();
+    }
+    table_entry = table_entry->next();
+  }
+
+  if (!stacktrace.have_lineno()) {
+    return 0;
+  }
+
+  traceid id = ++_next_id;
+  _table[index] = new StackTrace(id, stacktrace, _table[index]);
+  ++_entries;
+  return id;
+}
+
+traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
+  return instance().add_trace(stacktrace);
+}
+
+traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
+  assert(thread == Thread::current(), "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  if (tl->has_cached_stack_trace()) {
+    return tl->cached_stack_trace_id();
+  }
+  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
+    return 0;
+  }
+  JfrStackFrame* frames = tl->stackframes();
+  if (frames == NULL) {
+    // pending oom
+    return 0;
+  }
+  assert(frames != NULL, "invariant");
+  assert(tl->stackframes() == frames, "invariant");
+  return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
+}
+
+traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) {
+  assert(thread == Thread::current(), "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+
+  if (tl->has_cached_stack_trace()) {
+    *hash = tl->cached_stack_trace_hash();
+    return tl->cached_stack_trace_id();
+  }
+  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
+    return 0;
+  }
+  JfrStackFrame* frames = tl->stackframes();
+  if (frames == NULL) {
+    // pending oom
+    return 0;
+  }
+  assert(frames != NULL, "invariant");
+  assert(tl->stackframes() == frames, "invariant");
+  return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth(), hash);
+}
+
+traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
+  JfrStackTrace stacktrace(frames, max_frames);
+  if (!stacktrace.record_safe(thread, skip)) {
+    return 0;
+  }
+  traceid tid = add(stacktrace);
+  if (tid == 0) {
+    stacktrace.resolve_linenos();
+    tid = add(stacktrace);
+  }
+  return tid;
+}
+
+traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) {
+  assert(hash != NULL && *hash == 0, "invariant");
+  JfrStackTrace stacktrace(frames, max_frames);
+  if (!stacktrace.record_safe(thread, skip, true)) {
+    return 0;
+  }
+  traceid tid = add(stacktrace);
+  if (tid == 0) {
+    stacktrace.resolve_linenos();
+    tid = add(stacktrace);
+  }
+  *hash = stacktrace._hash;
+  return tid;
+}
+
+size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
+  MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+  assert(_entries > 0, "invariant");
+  int count = 0;
+  for (u4 i = 0; i < TABLE_SIZE; ++i) {
+    JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
+    while (stacktrace != NULL) {
+      JfrStackTraceRepository::StackTrace* next = stacktrace->next();
+      if (stacktrace->should_write()) {
+        stacktrace->write(sw);
+        ++count;
+      }
+      if (clear) {
+        delete stacktrace;
+      }
+      stacktrace = next;
+    }
+  }
+  if (clear) {
+    memset(_table, 0, sizeof(_table));
+    _entries = 0;
+  }
+  return count;
+}
+
+size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
+  return _entries > 0 ? write_impl(sw, clear) : 0;
+}
+
+traceid JfrStackTraceRepository::write(JfrCheckpointWriter& writer, traceid id, unsigned int hash) {
+  assert(JfrStacktrace_lock->owned_by_self(), "invariant");
+  const StackTrace* const trace = resolve_entry(hash, id);
+  assert(trace != NULL, "invariant");
+  assert(trace->hash() == hash, "invariant");
+  assert(trace->id() == id, "invariant");
+  trace->write(writer);
+  return id;
+}
+
+JfrStackTraceRepository::StackTrace::StackTrace(traceid id, const JfrStackTrace& trace, JfrStackTraceRepository::StackTrace* next) :
+  _next(next),
+  _frames(NULL),
+  _id(id),
+  _nr_of_frames(trace._nr_of_frames),
+  _hash(trace._hash),
+  _reached_root(trace._reached_root),
+  _written(false) {
+  if (_nr_of_frames > 0) {
+    _frames = NEW_C_HEAP_ARRAY(JfrStackFrame, _nr_of_frames, mtTracing);
+    memcpy(_frames, trace._frames, _nr_of_frames * sizeof(JfrStackFrame));
+  }
+}
+
+JfrStackTraceRepository::StackTrace::~StackTrace() {
+  if (_frames != NULL) {
+    FREE_C_HEAP_ARRAY(JfrStackFrame, _frames, mtTracing);
+  }
+}
+
+bool JfrStackTraceRepository::StackTrace::equals(const JfrStackTrace& rhs) const {
+  if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
+    return false;
+  }
+  for (u4 i = 0; i < _nr_of_frames; ++i) {
+    if (!_frames[i].equals(rhs._frames[i])) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template <typename Writer>
+static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
+  w.write((u8)id);
+  w.write((u1)!reached_root);
+  w.write(nr_of_frames);
+  for (u4 i = 0; i < nr_of_frames; ++i) {
+    frames[i].write(w);
+  }
+}
+
+void JfrStackTraceRepository::StackTrace::write(JfrChunkWriter& sw) const {
+  assert(!_written, "invariant");
+  write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
+  _written = true;
+}
+
+void JfrStackTraceRepository::StackTrace::write(JfrCheckpointWriter& cpw) const {
+  write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
+}
+
+// JfrStackFrame
+
+bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
+  return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
+}
+
+template <typename Writer>
+static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
+  w.write((u8)methodid);
+  w.write((u4)line);
+  w.write((u4)bci);
+  w.write((u8)type);
+}
+
+void JfrStackFrame::write(JfrChunkWriter& cw) const {
+  write_frame(cw, _methodid, _line, _bci, _type);
+}
+
+void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
+  write_frame(cpw, _methodid, _line, _bci, _type);
+}
+
+// invariant is that the entry to be resolved actually exists in the table
+const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entry(unsigned int hash, traceid id) const {
+  const size_t index = (hash % TABLE_SIZE);
+  const StackTrace* trace = _table[index];
+  while (trace != NULL && trace->id() != id) {
+    trace = trace->next();
+  }
+  assert(trace != NULL, "invariant");
+  assert(trace->hash() == hash, "invariant");
+  assert(trace->id() == id, "invariant");
+  return trace;
+}
+
+void JfrStackFrame::resolve_lineno() {
+  assert(_method, "no method pointer");
+  assert(_line == 0, "already have linenumber");
+  _line = _method->line_number_from_bci(_bci);
+  _method = NULL;
+}
+
+void JfrStackTrace::set_frame(u4 frame_pos, JfrStackFrame& frame) {
+  assert(frame_pos < _max_frames, "illegal frame_pos");
+  _frames[frame_pos] = frame;
+}
+
+void JfrStackTrace::resolve_linenos() {
+  for(unsigned int i = 0; i < _nr_of_frames; i++) {
+    _frames[i].resolve_lineno();
+  }
+  _lineno = true;
+}
+
+bool JfrStackTrace::record_safe(JavaThread* thread, int skip, bool leakp /* false */) {
+  assert(SafepointSynchronize::safepoint_safe(thread, thread->thread_state())
+         || thread == Thread::current(), "Thread stack needs to be walkable");
+  vframeStream vfs(thread);
+  u4 count = 0;
+  _reached_root = true;
+  for(int i = 0; i < skip; i++) {
+    if (vfs.at_end()) {
+      break;
+    }
+    vfs.next();
+  }
+
+  while (!vfs.at_end()) {
+    if (count >= _max_frames) {
+      _reached_root = false;
+      break;
+    }
+    const Method* method = vfs.method();
+    const traceid mid = JfrTraceId::use(method, leakp);
+    int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
+    int bci = 0;
+    if (method->is_native()) {
+      type = JfrStackFrame::FRAME_NATIVE;
+    } else {
+      bci = vfs.bci();
+    }
+    // Can we determine if it's inlined?
+    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
+    _frames[count] = JfrStackFrame(mid, bci, type, method);
+    vfs.next();
+    count++;
+  }
+
+  _nr_of_frames = count;
+  return true;
+}
+
+bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
+  vframeStreamSamples st(&thread, frame, false);
+  u4 count = 0;
+  _reached_root = true;
+
+  while (!st.at_end()) {
+    if (count >= _max_frames) {
+      _reached_root = false;
+      break;
+    }
+    const Method* method = st.method();
+    if (!method->is_valid_method()) {
+      // we throw away everything we've gathered in this sample since
+      // none of it is safe
+      return false;
+    }
+    const traceid mid = JfrTraceId::use(method);
+    int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
+    int bci = 0;
+    if (method->is_native()) {
+      type = JfrStackFrame::FRAME_NATIVE;
+    } else {
+      bci = st.bci();
+    }
+    const int lineno = method->line_number_from_bci(bci);
+    // Can we determine if it's inlined?
+    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
+    _frames[count] = JfrStackFrame(mid, bci, type, lineno);
+    st.samples_next();
+    count++;
+  }
+
+  _lineno = true;
+  _nr_of_frames = count;
+  return true;
+}
+
+void JfrStackTraceRepository::write_metadata(JfrCheckpointWriter& writer) {
+  JfrFrameType fct;
+  writer.write_type(TYPE_FRAMETYPE);
+  fct.serialize(writer);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
+#define SHARE_VM_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+class frame;
+class JavaThread;
+class JfrCheckpointSystem;
+class JfrCheckpointWriter;
+class JfrChunkWriter;
+class Method;
+
+class JfrStackFrame {
+ private:
+  const Method* _method;
+  traceid _methodid;
+  int _line;
+  int _bci;
+  u1 _type;
+
+ public:
+  enum {
+    FRAME_INTERPRETER = 0,
+    FRAME_JIT,
+    FRAME_INLINE,
+    FRAME_NATIVE,
+    NUM_FRAME_TYPES
+  };
+
+  JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
+    _method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
+  JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
+    _method(NULL), _methodid(id), _line(0), _bci(bci), _type(type) {}
+  bool equals(const JfrStackFrame& rhs) const;
+  void write(JfrChunkWriter& cw) const;
+  void write(JfrCheckpointWriter& cpw) const;
+  void resolve_lineno();
+};
+
+class JfrStackTrace : public StackObj {
+  friend class JfrStackTraceRepository;
+ private:
+  JfrStackFrame* _frames;
+  traceid _id;
+  u4 _nr_of_frames;
+  unsigned int _hash;
+  const u4 _max_frames;
+  bool _reached_root;
+  bool _lineno;
+
+ public:
+  JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
+                                                        _id(0),
+                                                        _nr_of_frames(0),
+                                                        _hash(0),
+                                                        _reached_root(false),
+                                                        _max_frames(max_frames),
+                                                        _lineno(false) {}
+  bool record_thread(JavaThread& thread, frame& frame);
+  bool record_safe(JavaThread* thread, int skip, bool leakp = false);
+  void resolve_linenos();
+  void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
+  void set_hash(unsigned int hash) { _hash = hash; }
+  void set_frame(u4 frame_pos, JfrStackFrame& frame);
+  void set_reached_root(bool reached_root) { _reached_root = reached_root; }
+  bool full_stacktrace() const { return _reached_root; }
+  bool have_lineno() const { return _lineno; }
+};
+
+class JfrStackTraceRepository : public JfrCHeapObj {
+  friend class JfrRecorder;
+  friend class JfrRecorderService;
+  friend class ObjectSampler;
+  friend class WriteObjectSampleStacktrace;
+
+  class StackTrace : public JfrCHeapObj {
+    friend class JfrStackTrace;
+    friend class JfrStackTraceRepository;
+   private:
+    StackTrace* _next;
+    JfrStackFrame* _frames;
+    const traceid _id;
+    u4 _nr_of_frames;
+    unsigned int _hash;
+    bool _reached_root;
+    mutable bool _written;
+
+    unsigned int hash() const { return _hash; }
+    bool should_write() const { return !_written; }
+
+   public:
+    StackTrace(traceid id, const JfrStackTrace& trace, StackTrace* next);
+    ~StackTrace();
+    traceid id() const { return _id; }
+    StackTrace* next() const { return _next; }
+    void write(JfrChunkWriter& cw) const;
+    void write(JfrCheckpointWriter& cpw) const;
+    bool equals(const JfrStackTrace& rhs) const;
+  };
+
+ private:
+  static const u4 TABLE_SIZE = 2053;
+  StackTrace* _table[TABLE_SIZE];
+  traceid _next_id;
+  u4 _entries;
+
+  size_t write_impl(JfrChunkWriter& cw, bool clear);
+  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
+  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash);
+  traceid add_trace(const JfrStackTrace& stacktrace);
+  const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
+
+  static void write_metadata(JfrCheckpointWriter& cpw);
+
+  JfrStackTraceRepository();
+  static JfrStackTraceRepository& instance();
+ public:
+  static JfrStackTraceRepository* create();
+  bool initialize();
+  static void destroy();
+  static traceid add(const JfrStackTrace& stacktrace);
+  static traceid record(Thread* thread, int skip = 0);
+  static traceid record(Thread* thread, int skip, unsigned int* hash);
+  traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
+  size_t write(JfrChunkWriter& cw, bool clear);
+  size_t clear();
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrBuffer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/thread.inline.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+
+static const u1* const MUTEX_CLAIM = NULL;
+
+JfrBuffer::JfrBuffer() : _next(NULL),
+                         _prev(NULL),
+                         _identity(NULL),
+                         _pos(NULL),
+                         _top(NULL),
+                         _flags(0),
+                         _header_size(0),
+                         _size(0) {}
+
+bool JfrBuffer::initialize(size_t header_size, size_t size, const void* id /* NULL */) {
+  _header_size = (u2)header_size;
+  _size = (u4)(size / BytesPerWord);
+  assert(_identity == NULL, "invariant");
+  _identity = id;
+  set_pos(start());
+  set_top(start());
+  assert(_next == NULL, "invariant");
+  assert(free_size() == size, "invariant");
+  assert(!transient(), "invariant");
+  assert(!lease(), "invariant");
+  assert(!retired(), "invariant");
+  return true;
+}
+
+void JfrBuffer::reinitialize() {
+  assert(!lease(), "invariant");
+  assert(!transient(), "invariant");
+  set_pos(start());
+  clear_retired();
+  set_top(start());
+}
+
+void JfrBuffer::concurrent_reinitialization() {
+  concurrent_top();
+  assert(!lease(), "invariant");
+  assert(!transient(), "invariant");
+  set_pos(start());
+  set_concurrent_top(start());
+  clear_retired();
+}
+
+size_t JfrBuffer::discard() {
+  size_t discard_size = unflushed_size();
+  set_top(pos());
+  return discard_size;
+}
+
+const u1* JfrBuffer::stable_top() const {
+  const u1* current_top;
+  do {
+    current_top = (const u1*)OrderAccess::load_ptr_acquire(&_top);
+  } while (MUTEX_CLAIM == current_top);
+  return current_top;
+}
+
+const u1* JfrBuffer::top() const {
+  return _top;
+}
+
+void JfrBuffer::set_top(const u1* new_top) {
+  _top = new_top;
+}
+
+const u1* JfrBuffer::concurrent_top() const {
+  do {
+    const u1* current_top = stable_top();
+    if (Atomic::cmpxchg_ptr((void*)MUTEX_CLAIM, &_top, (void*)current_top) == current_top) {
+      return current_top;
+    }
+  } while (true);
+}
+
+void JfrBuffer::set_concurrent_top(const u1* new_top) {
+  assert(new_top != MUTEX_CLAIM, "invariant");
+  assert(new_top <= end(), "invariant");
+  assert(new_top >= start(), "invariant");
+  assert(top() == MUTEX_CLAIM, "invariant");
+  OrderAccess::release_store_ptr(&_top, (void*)new_top);
+}
+
+size_t JfrBuffer::unflushed_size() const {
+  return pos() - stable_top();
+}
+
+void JfrBuffer::acquire(const void* id) {
+  assert(id != NULL, "invariant");
+  const void* current_id;
+  do {
+    current_id = OrderAccess::load_ptr_acquire(&_identity);
+  } while (current_id != NULL || Atomic::cmpxchg_ptr((void*)id, &_identity, (void*)current_id) != current_id);
+}
+
+bool JfrBuffer::try_acquire(const void* id) {
+  assert(id != NULL, "invariant");
+  const void* const current_id = OrderAccess::load_ptr_acquire(&_identity);
+  return current_id == NULL && Atomic::cmpxchg_ptr((void*)id, &_identity, (void*)current_id) == current_id;
+}
+
+void JfrBuffer::release() {
+  OrderAccess::release_store_ptr(&_identity, (void*)NULL);
+}
+
+void JfrBuffer::clear_identity() {
+  _identity = NULL;
+}
+
+#ifdef ASSERT
+static bool validate_to(const JfrBuffer* const to, size_t size) {
+  assert(to != NULL, "invariant");
+  if (!JfrRecorder::is_shutting_down()) assert(to->acquired_by_self(), "invariant");
+  assert(to->free_size() >= size, "invariant");
+  return true;
+}
+
+static bool validate_concurrent_this(const JfrBuffer* const t, size_t size) {
+  assert(t->top() == MUTEX_CLAIM, "invariant");
+  return true;
+}
+
+static bool validate_this(const JfrBuffer* const t, size_t size) {
+  assert(t->top() + size <= t->pos(), "invariant");
+  return true;
+}
+
+bool JfrBuffer::acquired_by_self() const {
+  return identity() == Thread::current();
+}
+#endif // ASSERT
+
+void JfrBuffer::move(JfrBuffer* const to, size_t size) {
+  assert(validate_to(to, size), "invariant");
+  assert(validate_this(this, size), "invariant");
+  const u1* current_top = top();
+  assert(current_top != NULL, "invariant");
+  memcpy(to->pos(), current_top, size);
+  to->set_pos(size);
+  to->release();
+  set_top(current_top + size);
+}
+
+void JfrBuffer::concurrent_move_and_reinitialize(JfrBuffer* const to, size_t size) {
+  assert(validate_to(to, size), "invariant");
+  const u1* current_top = concurrent_top();
+  assert(validate_concurrent_this(this, size), "invariant");
+  const size_t actual_size = MIN2(size, (size_t)(pos() - current_top));
+  assert(actual_size <= size, "invariant");
+  memcpy(to->pos(), current_top, actual_size);
+  to->set_pos(actual_size);
+  set_pos(start());
+  to->release();
+  set_concurrent_top(start());
+}
+
+// flags
+enum FLAG {
+  RETIRED = 1,
+  TRANSIENT = 2,
+  LEASE = 4
+};
+
+bool JfrBuffer::transient() const {
+  return (u1)TRANSIENT == (_flags & (u1)TRANSIENT);
+}
+
+void JfrBuffer::set_transient() {
+  _flags |= (u1)TRANSIENT;
+  assert(transient(), "invariant");
+}
+
+void JfrBuffer::clear_transient() {
+  if (transient()) {
+    _flags ^= (u1)TRANSIENT;
+  }
+  assert(!transient(), "invariant");
+}
+
+bool JfrBuffer::lease() const {
+  return (u1)LEASE == (_flags & (u1)LEASE);
+}
+
+void JfrBuffer::set_lease() {
+  _flags |= (u1)LEASE;
+  assert(lease(), "invariant");
+}
+
+void JfrBuffer::clear_lease() {
+  if (lease()) {
+    _flags ^= (u1)LEASE;
+  }
+  assert(!lease(), "invariant");
+}
+
+static u2 load_acquire_flags(const u2* const flags) {
+  return OrderAccess::load_acquire((volatile jushort *)flags);
+}
+
+static void release_store_flags(u2* const flags, u2 new_flags) {
+  OrderAccess::release_store(flags, new_flags);
+}
+
+bool JfrBuffer::retired() const {
+  return (u1)RETIRED == (load_acquire_flags(&_flags) & (u1)RETIRED);
+}
+
+void JfrBuffer::set_retired() {
+  const u2 new_flags = load_acquire_flags(&_flags) | (u1)RETIRED;
+  release_store_flags(&_flags, new_flags);
+}
+
+void JfrBuffer::clear_retired() {
+  u2 new_flags = load_acquire_flags(&_flags);
+  if ((u1)RETIRED == (new_flags & (u1)RETIRED)) {
+    new_flags ^= (u1)RETIRED;
+    release_store_flags(&_flags, new_flags);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrBuffer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRBUFFER_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRBUFFER_HPP
+
+#include "memory/allocation.hpp"
+
+//
+// Represents a piece of committed memory.
+//
+// u1* _pos <-- next store position
+// u1* _top <-- next unflushed position
+//
+// const void* _identity <-- acquired by
+//
+// Must be the owner before attempting stores.
+// Use acquire() and/or try_acquire() for exclusive access
+// to the (entire) buffer (cas identity).
+//
+// Stores to the buffer should uphold transactional semantics.
+// A new _pos must be updated only after all intended stores have completed.
+// The relation between _pos and _top must hold atomically,
+// e.g. the delta must always be fully parsable.
+// _top can move concurrently by other threads but is always <= _pos.
+//
+class JfrBuffer {
+ private:
+  JfrBuffer* _next;
+  JfrBuffer* _prev;
+  const void* volatile _identity;
+  u1* _pos;
+  mutable const u1* volatile _top;
+  u2 _flags;
+  u2 _header_size;
+  u4 _size;
+
+  const u1* stable_top() const;
+  void clear_flags();
+
+ public:
+  JfrBuffer();
+  bool initialize(size_t header_size, size_t size, const void* id = NULL);
+  void reinitialize();
+  void concurrent_reinitialization();
+  size_t discard();
+  JfrBuffer* next() const {
+    return _next;
+  }
+
+  JfrBuffer* prev() const {
+    return _prev;
+  }
+
+  void set_next(JfrBuffer* next) {
+    _next = next;
+  }
+
+  void set_prev(JfrBuffer* prev) {
+    _prev = prev;
+  }
+
+  const u1* start() const {
+    return ((const u1*)this) + _header_size;
+  }
+
+  u1* start() {
+    return ((u1*)this) + _header_size;
+  }
+
+  const u1* end() const {
+    return start() + size();
+  }
+
+  const u1* pos() const {
+    return _pos;
+  }
+
+  u1* pos() {
+    return _pos;
+  }
+
+  u1** pos_address() {
+    return (u1**)&_pos;
+  }
+
+  void set_pos(u1* new_pos) {
+    assert(new_pos <= end(), "invariant");
+    _pos = new_pos;
+  }
+
+  void set_pos(size_t size) {
+    assert(_pos + size <= end(), "invariant");
+    _pos += size;
+  }
+
+  const u1* top() const;
+  void set_top(const u1* new_top);
+  const u1* concurrent_top() const;
+  void set_concurrent_top(const u1* new_top);
+
+  size_t header_size() const {
+    return _header_size;
+  }
+
+  size_t size() const {
+    return _size * BytesPerWord;
+  }
+
+  size_t total_size() const {
+    return header_size() + size();
+  }
+
+  size_t free_size() const {
+    return end() - pos();
+  }
+
+  size_t unflushed_size() const;
+
+  bool empty() const {
+    return pos() == start();
+  }
+
+  const void* identity() const {
+    return _identity;
+  }
+
+  void clear_identity();
+
+  void acquire(const void* id);
+  bool try_acquire(const void* id);
+  void release();
+
+  void move(JfrBuffer* const to, size_t size);
+  void concurrent_move_and_reinitialize(JfrBuffer* const to, size_t size);
+
+  bool transient() const;
+  void set_transient();
+  void clear_transient();
+
+  bool lease() const;
+  void set_lease();
+  void clear_lease();
+
+  bool retired() const;
+  void set_retired();
+  void clear_retired();
+
+  debug_only(bool acquired_by_self() const;)
+};
+
+class JfrAgeNode : public JfrBuffer {
+ private:
+  JfrBuffer* _retired;
+
+ public:
+  JfrAgeNode() : _retired(NULL) {}
+  void set_retired_buffer(JfrBuffer* retired) {
+    _retired = retired;
+  }
+  JfrBuffer* retired_buffer() const {
+    return _retired;
+  }
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRBUFFER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrMemorySpace.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrDoublyLinkedList.hpp"
+#include "jfr/utilities/jfrIterator.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+class JfrMemorySpace : public JfrCHeapObj {
+ public:
+  typedef T Type;
+  typedef RetrievalType<JfrMemorySpace<T, RetrievalType, Callback> > Retrieval;
+  typedef JfrDoublyLinkedList<Type> List;
+  typedef StopOnNullIterator<List> Iterator;
+ private:
+  List _free;
+  List _full;
+  size_t _min_elem_size;
+  size_t _limit_size;
+  size_t _cache_count;
+  Callback* _callback;
+
+  bool should_populate_cache() const { return _free.count() < _cache_count; }
+
+ public:
+  JfrMemorySpace(size_t min_elem_size, size_t limit_size, size_t cache_count, Callback* callback);
+  ~JfrMemorySpace();
+  bool initialize();
+
+  size_t min_elem_size() const { return _min_elem_size; }
+  size_t limit_size() const { return _limit_size; }
+
+  bool has_full() const { return _full.head() != NULL; }
+  bool has_free() const { return _free.head() != NULL; }
+  bool is_full_empty() const { return !has_full(); }
+  bool is_free_empty() const { return !has_free(); }
+
+  size_t full_count() const { return _full.count(); }
+  size_t free_count() const { return _free.count(); }
+
+  List& full() { return _full; }
+  const List& full() const { return _full; }
+  List& free() { return _free; }
+  const List& free() const { return _free; }
+
+  Type* full_head() { return _full.head(); }
+  Type* full_tail() { return _full.tail(); }
+  Type* free_head() { return _free.head(); }
+  Type* free_tail() { return _free.tail(); }
+
+  void insert_free_head(Type* t) { _free.prepend(t); }
+  void insert_free_tail(Type* t) { _free.append(t); }
+  void insert_free_tail(Type* t, Type* tail, size_t count) { _free.append_list(t, tail, count); }
+  void insert_full_head(Type* t) { _full.prepend(t); }
+  void insert_full_tail(Type* t) { _full.append(t); }
+  void insert_full_tail(Type* t, Type* tail, size_t count) { _full.append_list(t, tail, count); }
+
+  Type* remove_free(Type* t) { return _free.remove(t); }
+  Type* remove_full(Type* t) { return _full.remove(t); }
+  Type* remove_free_tail() { _free.remove(_free.tail()); }
+  Type* remove_full_tail() { return _full.remove(_full.tail()); }
+  Type* clear_full(bool return_tail = false) { return _full.clear(return_tail); }
+  Type* clear_free(bool return_tail = false) { return _free.clear(return_tail); }
+  void release_full(Type* t);
+  void release_free(Type* t);
+
+  void register_full(Type* t, Thread* thread) { _callback->register_full(t, thread); }
+  void lock() { _callback->lock(); }
+  void unlock() { _callback->unlock(); }
+  DEBUG_ONLY(bool is_locked() const { return _callback->is_locked(); })
+
+  Type* allocate(size_t size);
+  void deallocate(Type* t);
+  Type* get(size_t size, Thread* thread) { return Retrieval::get(size, this, thread); }
+
+  template <typename IteratorCallback, typename IteratorType>
+  void iterate(IteratorCallback& callback, bool full = true, jfr_iter_direction direction = forward);
+
+  debug_only(bool in_full_list(const Type* t) const { return _full.in_list(t); })
+  debug_only(bool in_free_list(const Type* t) const { return _free.in_list(t); })
+};
+
+// allocations are even multiples of the mspace min size
+inline u8 align_allocation_size(u8 requested_size, size_t min_elem_size) {
+  assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
+  u8 alloc_size_bytes = min_elem_size;
+  while (requested_size > alloc_size_bytes) {
+    alloc_size_bytes <<= 1;
+  }
+  assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
+  return alloc_size_bytes;
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) {
+  const u8 aligned_size_bytes = align_allocation_size(size, _min_elem_size);
+  void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T));
+  if (allocation == NULL) {
+    return NULL;
+  }
+  T* const t = new (allocation) T;
+  assert(t != NULL, "invariant");
+  if (!t->initialize(sizeof(T), aligned_size_bytes)) {
+    JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T));
+    return NULL;
+  }
+  return t;
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) {
+  assert(t != NULL, "invariant");
+  assert(!_free.in_list(t), "invariant");
+  assert(!_full.in_list(t), "invariant");
+  assert(t != NULL, "invariant");
+  JfrCHeapObj::free(t, t->total_size());
+}
+
+template <typename Mspace>
+class MspaceLock {
+ private:
+  Mspace* _mspace;
+ public:
+  MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); }
+  ~MspaceLock() { _mspace->unlock(); }
+};
+
+template <typename Mspace>
+class ReleaseOp : public StackObj {
+ private:
+  Mspace* _mspace;
+  Thread* _thread;
+  bool _release_full;
+ public:
+  typedef typename Mspace::Type Type;
+  ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) : _mspace(mspace), _thread(thread), _release_full(release_full) {}
+  bool process(Type* t);
+  size_t processed() const { return 0; }
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrMemorySpace.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
+
+#include "jfr/recorder/storage/jfrMemorySpace.hpp"
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+JfrMemorySpace<T, RetrievalType, Callback>::
+JfrMemorySpace(size_t min_elem_size, size_t limit_size, size_t cache_count, Callback* callback) :
+  _free(),
+  _full(),
+  _min_elem_size(min_elem_size),
+  _limit_size(limit_size),
+  _cache_count(cache_count),
+  _callback(callback) {}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+JfrMemorySpace<T, RetrievalType, Callback>::~JfrMemorySpace() {
+  Iterator full_iter(_full);
+  while (full_iter.has_next()) {
+    Type* t = full_iter.next();
+    _full.remove(t);
+    deallocate(t);
+  }
+  Iterator free_iter(_free);
+  while (free_iter.has_next()) {
+    Type* t = free_iter.next();
+    _free.remove(t);
+    deallocate(t);
+  }
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+bool JfrMemorySpace<T, RetrievalType, Callback>::initialize() {
+  assert(_min_elem_size % os::vm_page_size() == 0, "invariant");
+  assert(_limit_size % os::vm_page_size() == 0, "invariant");
+  // pre-allocate cache elements
+  for (size_t i = 0; i < _cache_count; ++i) {
+    Type* const t = allocate(_min_elem_size);
+    if (t == NULL) {
+      return false;
+    }
+    insert_free_head(t);
+  }
+  assert(_free.count() == _cache_count, "invariant");
+  return true;
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+inline void JfrMemorySpace<T, RetrievalType, Callback>::release_full(T* t) {
+  assert(is_locked(), "invariant");
+  assert(t != NULL, "invariant");
+  assert(_full.in_list(t), "invariant");
+  remove_full(t);
+  assert(!_full.in_list(t), "invariant");
+  if (t->transient()) {
+    deallocate(t);
+    return;
+  }
+  assert(t->empty(), "invariant");
+  assert(!t->retired(), "invariant");
+  assert(t->identity() == NULL, "invariant");
+  if (should_populate_cache()) {
+    assert(!_free.in_list(t), "invariant");
+    insert_free_head(t);
+  } else {
+    deallocate(t);
+  }
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+inline void JfrMemorySpace<T, RetrievalType, Callback>::release_free(T* t) {
+  assert(is_locked(), "invariant");
+  assert(t != NULL, "invariant");
+  assert(_free.in_list(t), "invariant");
+  if (t->transient()) {
+    remove_free(t);
+    assert(!_free.in_list(t), "invariant");
+    deallocate(t);
+    return;
+  }
+  assert(t->empty(), "invariant");
+  assert(!t->retired(), "invariant");
+  assert(t->identity() == NULL, "invariant");
+  if (!should_populate_cache()) {
+    remove_free(t);
+    assert(!_free.in_list(t), "invariant");
+    deallocate(t);
+  }
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+template <typename IteratorCallback, typename IteratorType>
+inline void JfrMemorySpace<T, RetrievalType, Callback>
+::iterate(IteratorCallback& callback, bool full, jfr_iter_direction direction) {
+  IteratorType iterator(full ? _full : _free, direction);
+  while (iterator.has_next()) {
+    callback.process(iterator.next());
+  }
+}
+
+template <typename Mspace>
+inline size_t size_adjustment(size_t size, Mspace* mspace) {
+  assert(mspace != NULL, "invariant");
+  static const size_t min_elem_size = mspace->min_elem_size();
+  if (size < min_elem_size) {
+    size = min_elem_size;
+  }
+  return size;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate(size_t size, Mspace* mspace) {
+  return mspace->allocate(size_adjustment(size, mspace));
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate_acquired(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* const t = mspace_allocate(size, mspace);
+  if (t == NULL) return NULL;
+  t->acquire(thread);
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate_transient(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* const t = mspace_allocate_acquired(size, mspace, thread);
+  if (t == NULL) return NULL;
+  assert(t->acquired_by_self(), "invariant");
+  t->set_transient();
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate_transient_lease(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* const t = mspace_allocate_transient(size, mspace, thread);
+  if (t == NULL) return NULL;
+  assert(t->acquired_by_self(), "invariant");
+  assert(t->transient(), "invaiant");
+  t->set_lease();
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate_to_full(size_t size, Mspace* mspace, Thread* thread) {
+  assert(mspace->is_locked(), "invariant");
+  typename Mspace::Type* const t = mspace_allocate_acquired(size, mspace, thread);
+  if (t == NULL) return NULL;
+  mspace->insert_full_head(t);
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate_transient_to_full(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* const t = mspace_allocate_transient(size, mspace, thread);
+  if (t == NULL) return NULL;
+  MspaceLock<Mspace> lock(mspace);
+  mspace->insert_full_head(t);
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate_transient_lease_to_full(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* const t = mspace_allocate_transient_lease(size, mspace, thread);
+  if (t == NULL) return NULL;
+  assert(t->acquired_by_self(), "invariant");
+  assert(t->transient(), "invaiant");
+  assert(t->lease(), "invariant");
+  MspaceLock<Mspace> lock(mspace);
+  mspace->insert_full_head(t);
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_allocate_transient_lease_to_free(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* const t = mspace_allocate_transient_lease(size, mspace, thread);
+  if (t == NULL) return NULL;
+  assert(t->acquired_by_self(), "invariant");
+  assert(t->transient(), "invaiant");
+  assert(t->lease(), "invariant");
+  MspaceLock<Mspace> lock(mspace);
+  mspace->insert_free_head(t);
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_get_free(size_t size, Mspace* mspace, Thread* thread) {
+  return mspace->get(size, thread);
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_get_free_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread) {
+  assert(size <= mspace->min_elem_size(), "invariant");
+  for (size_t i = 0; i < retry_count; ++i) {
+    typename Mspace::Type* const t = mspace_get_free(size, mspace, thread);
+    if (t != NULL) {
+      return t;
+    }
+  }
+  return NULL;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_get_free_with_detach(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* t = mspace_get_free(size, mspace, thread);
+  if (t != NULL) {
+    mspace->remove_free(t);
+  }
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_get_free_to_full(size_t size, Mspace* mspace, Thread* thread) {
+  assert(size <= mspace->min_elem_size(), "invariant");
+  assert(mspace->is_locked(), "invariant");
+  typename Mspace::Type* t = mspace_get_free(size, mspace, thread);
+  if (t == NULL) {
+    return NULL;
+  }
+  assert(t->acquired_by_self(), "invariant");
+  move_to_head(t, mspace->free(), mspace->full());
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_get_to_full(size_t size, Mspace* mspace, Thread* thread) {
+  size = size_adjustment(size, mspace);
+  MspaceLock<Mspace> lock(mspace);
+  if (size <= mspace->min_elem_size()) {
+    typename Mspace::Type* const t = mspace_get_free_to_full(size, mspace, thread);
+    if (t != NULL) {
+      return t;
+    }
+  }
+  return mspace_allocate_to_full(size, mspace, thread);
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_get_free_lease_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread) {
+  typename Mspace::Type* t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
+  if (t != NULL) {
+    t->set_lease();
+  }
+  return t;
+}
+
+template <typename Mspace>
+inline typename Mspace::Type* mspace_get_lease(size_t size, Mspace* mspace, Thread* thread) {
+  typename Mspace::Type* t;
+  t = mspace_get_free_lease(size, mspace, thread);
+  if (t != NULL) {
+    assert(t->acquired_by_self(), "invariant");
+    assert(t->lease(), "invariant");
+    return t;
+  }
+  t = mspace_allocate_transient_to_full(size, mspace, thread);
+  if (t != NULL) {
+    t->set_lease();
+  }
+  return t;
+}
+
+template <typename Mspace>
+inline void mspace_release_full(typename Mspace::Type* t, Mspace* mspace) {
+  assert(t != NULL, "invariant");
+  assert(t->unflushed_size() == 0, "invariant");
+  assert(mspace != NULL, "invariant");
+  assert(mspace->is_locked(), "invariant");
+  mspace->release_full(t);
+}
+
+template <typename Mspace>
+inline void mspace_release_free(typename Mspace::Type* t, Mspace* mspace) {
+  assert(t != NULL, "invariant");
+  assert(t->unflushed_size() == 0, "invariant");
+  assert(mspace != NULL, "invariant");
+  assert(mspace->is_locked(), "invariant");
+  mspace->release_free(t);
+}
+
+template <typename Mspace>
+inline void mspace_release_full_critical(typename Mspace::Type* t, Mspace* mspace) {
+  MspaceLock<Mspace> lock(mspace);
+  mspace_release_full(t, mspace);
+}
+
+template <typename Mspace>
+inline void mspace_release_free_critical(typename Mspace::Type* t, Mspace* mspace) {
+  MspaceLock<Mspace> lock(mspace);
+  mspace_release_free(t, mspace);
+}
+
+template <typename List>
+inline void move_to_head(typename List::Node* t, List& from, List& to) {
+  assert(from.in_list(t), "invariant");
+  to.prepend(from.remove(t));
+}
+
+template <typename Processor, typename Mspace, typename Iterator>
+inline void process_free_list_iterator_control(Processor& processor, Mspace* mspace, jfr_iter_direction direction = forward) {
+  mspace->template iterate<Processor, Iterator>(processor, false, direction);
+}
+
+template <typename Processor, typename Mspace, typename Iterator>
+inline void process_full_list_iterator_control(Processor& processor, Mspace* mspace, jfr_iter_direction direction = forward) {
+  mspace->template iterate<Processor, Iterator>(processor, true, direction);
+}
+
+template <typename Processor, typename Mspace>
+inline void process_full_list(Processor& processor, Mspace* mspace, jfr_iter_direction direction = forward) {
+  assert(mspace != NULL, "invariant");
+  if (mspace->is_full_empty()) return;
+  process_full_list_iterator_control<Processor, Mspace, typename Mspace::Iterator>(processor, mspace, direction);
+}
+
+template <typename Processor, typename Mspace>
+inline void process_free_list(Processor& processor, Mspace* mspace, jfr_iter_direction direction = forward) {
+  assert(mspace != NULL, "invariant");
+  assert(mspace->has_free(), "invariant");
+  process_free_list_iterator_control<Processor, Mspace, typename Mspace::Iterator>(processor, mspace, direction);
+}
+
+template <typename Mspace>
+inline bool ReleaseOp<Mspace>::process(typename Mspace::Type* t) {
+  assert(t != NULL, "invariant");
+  if (t->retired() || t->try_acquire(_thread)) {
+    if (t->transient()) {
+      if (_release_full) {
+        mspace_release_full_critical(t, _mspace);
+      } else {
+        mspace_release_free_critical(t, _mspace);
+      }
+      return true;
+    }
+    t->reinitialize();
+    assert(t->empty(), "invariant");
+    t->release(); // publish
+  }
+  return true;
+}
+
+#ifdef ASSERT
+template <typename T>
+inline void assert_migration_state(const T* old, const T* new_buffer, size_t used, size_t requested) {
+  assert(old != NULL, "invariant");
+  assert(new_buffer != NULL, "invariant");
+  assert(old->pos() >= old->start(), "invariant");
+  assert(old->pos() + used <= old->end(), "invariant");
+  assert(new_buffer->free_size() >= (used + requested), "invariant");
+}
+#endif // ASSERT
+
+template <typename T>
+inline void migrate_outstanding_writes(const T* old, T* new_buffer, size_t used, size_t requested) {
+  DEBUG_ONLY(assert_migration_state(old, new_buffer, used, requested);)
+  if (used > 0) {
+    memcpy(new_buffer->pos(), old->pos(), used);
+  }
+}
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACERETRIEVAL_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACERETRIEVAL_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+/*
+* Some policy classes for getting mspace memory
+*/
+
+template <typename Mspace>
+class JfrMspaceRetrieval : AllStatic {
+ public:
+  typedef typename Mspace::Type Type;
+  static Type* get(size_t size, Mspace* mspace, typename Mspace::Iterator& iterator, Thread* thread) {
+    while (iterator.has_next()) {
+      Type* const t = iterator.next();
+      if (t->retired()) continue;
+      if (t->try_acquire(thread)) {
+        assert(!t->retired(), "invariant");
+        if (t->free_size() >= size) {
+          return t;
+        }
+        t->set_retired();
+        mspace->register_full(t, thread);
+      }
+    }
+    return NULL;
+  }
+};
+
+template <typename Mspace>
+class JfrMspaceAlternatingRetrieval {
+ private:
+   // provides stochastic distribution over "deque" endpoints; racy is ok here
+  static bool _last_access;
+ public:
+  typedef typename Mspace::Type Type;
+  static Type* get(size_t size, Mspace* mspace, Thread* thread) {
+    typename Mspace::Iterator iterator(mspace->free(), (_last_access = !_last_access) ? forward : backward);
+    return JfrMspaceRetrieval<Mspace>::get(size, mspace, iterator, thread);
+  }
+};
+
+template <typename Mspace>
+bool JfrMspaceAlternatingRetrieval<Mspace>::_last_access = false;
+
+template <typename Mspace>
+class JfrMspaceSequentialRetrieval {
+ public:
+  typedef typename Mspace::Type Type;
+  static Type* get(size_t size, Mspace* mspace, Thread* thread) {
+    typename Mspace::Iterator iterator(mspace->free());
+    return JfrMspaceRetrieval<Mspace>::get(size, mspace, iterator, thread);
+  }
+};
+
+template <typename Mspace>
+class JfrExclusiveRetrieval : AllStatic {
+public:
+  typedef typename Mspace::Type Type;
+  static Type* get(size_t size, Mspace* mspace, typename Mspace::Iterator& iterator, Thread* thread) {
+    assert(mspace->is_locked(), "invariant");
+    if (iterator.has_next()) {
+      Type* const t = iterator.next();
+      assert(!t->retired(), "invariant");
+      assert(t->identity() == NULL, "invariant");
+      assert(t->free_size() >= size, "invariant");
+      t->acquire(thread);
+      return t;
+    }
+    return NULL;
+  }
+};
+
+template <typename Mspace>
+class JfrThreadLocalRetrieval {
+public:
+  typedef typename Mspace::Type Type;
+  static Type* get(size_t size, Mspace* mspace, Thread* thread) {
+    typename Mspace::Iterator iterator(mspace->free(), forward);
+    return JfrExclusiveRetrieval<Mspace>::get(size, mspace, iterator, thread);
+  }
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACERETRIEVAL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrStorage.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,761 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/service/jfrPostBox.hpp"
+#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
+#include "jfr/recorder/storage/jfrStorage.hpp"
+#include "jfr/recorder/storage/jfrStorageControl.hpp"
+#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
+#include "jfr/utilities/jfrIterator.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/writers/jfrNativeEventWriter.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.hpp"
+
+typedef JfrStorage::Buffer* BufferPtr;
+
+static JfrStorage* _instance = NULL;
+static JfrStorageControl* _control;
+
+JfrStorage& JfrStorage::instance() {
+  return *_instance;
+}
+
+JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrStorage(chunkwriter, post_box);
+  return _instance;
+}
+
+void JfrStorage::destroy() {
+  if (_instance != NULL) {
+    delete _instance;
+    _instance = NULL;
+  }
+}
+
+JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
+  _control(NULL),
+  _global_mspace(NULL),
+  _thread_local_mspace(NULL),
+  _transient_mspace(NULL),
+  _age_mspace(NULL),
+  _chunkwriter(chunkwriter),
+  _post_box(post_box) {}
+
+JfrStorage::~JfrStorage() {
+  if (_control != NULL) {
+    delete _control;
+  }
+  if (_global_mspace != NULL) {
+    delete _global_mspace;
+  }
+  if (_thread_local_mspace != NULL) {
+    delete _thread_local_mspace;
+  }
+  if (_transient_mspace != NULL) {
+    delete _transient_mspace;
+  }
+  if (_age_mspace != NULL) {
+    delete _age_mspace;
+  }
+  _instance = NULL;
+}
+
+static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
+static const size_t unlimited_mspace_size = 0;
+static const size_t thread_local_cache_count = 8;
+static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
+static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
+
+template <typename Mspace>
+static Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrStorage* storage_instance) {
+  Mspace* mspace = new Mspace(buffer_size, limit, cache_count, storage_instance);
+  if (mspace != NULL) {
+    mspace->initialize();
+  }
+  return mspace;
+}
+
+bool JfrStorage::initialize() {
+  assert(_control == NULL, "invariant");
+  assert(_global_mspace == NULL, "invariant");
+  assert(_thread_local_mspace == NULL, "invariant");
+  assert(_transient_mspace == NULL, "invariant");
+  assert(_age_mspace == NULL, "invariant");
+
+  const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
+  assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
+  const size_t memory_size = (size_t)JfrOptionSet::memory_size();
+  const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
+  const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
+
+  _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
+  if (_control == NULL) {
+    return false;
+  }
+  _global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size, memory_size, num_global_buffers, this);
+  if (_global_mspace == NULL) {
+    return false;
+  }
+  _thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
+  if (_thread_local_mspace == NULL) {
+    return false;
+  }
+  _transient_mspace = create_mspace<JfrStorageMspace>(thread_buffer_size * transient_buffer_size_multiplier, unlimited_mspace_size, 0, this);
+  if (_transient_mspace == NULL) {
+    return false;
+  }
+  _age_mspace = create_mspace<JfrStorageAgeMspace>(0 /* no extra size except header */, unlimited_mspace_size, num_global_buffers, this);
+  if (_age_mspace == NULL) {
+    return false;
+  }
+  control().set_scavenge_threshold(thread_local_scavenge_threshold);
+  return true;
+}
+
+JfrStorageControl& JfrStorage::control() {
+  return *instance()._control;
+}
+
+static void log_allocation_failure(const char* msg, size_t size) {
+  if (LogJFR) tty->print_cr("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
+}
+
+BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
+  BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
+  if (buffer == NULL) {
+    log_allocation_failure("thread local_memory", size);
+    return NULL;
+  }
+  assert(buffer->acquired_by_self(), "invariant");
+  return buffer;
+}
+
+BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
+  BufferPtr buffer = mspace_allocate_transient_lease_to_full(size, instance()._transient_mspace, thread);
+  if (buffer == NULL) {
+    log_allocation_failure("transient memory", size);
+    return NULL;
+  }
+  assert(buffer->acquired_by_self(), "invariant");
+  assert(buffer->transient(), "invariant");
+  assert(buffer->lease(), "invariant");
+  return buffer;
+}
+
+static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
+  assert(size <= mspace->min_elem_size(), "invariant");
+  while (true) {
+    BufferPtr t = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
+    if (t == NULL && storage_instance.control().should_discard()) {
+      storage_instance.discard_oldest(thread);
+      continue;
+    }
+    return t;
+  }
+}
+
+static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
+  assert(size <= mspace->min_elem_size(), "invariant");
+  while (true) {
+    BufferPtr t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
+    if (t == NULL && storage_instance.control().should_discard()) {
+      storage_instance.discard_oldest(thread);
+      continue;
+    }
+    return t;
+  }
+}
+
+static const size_t lease_retry = 10;
+
+BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
+  JfrStorage& storage_instance = instance();
+  const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
+  // if not too large and capacity is still available, ask for a lease from the global system
+  if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
+    BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
+    if (buffer != NULL) {
+      assert(buffer->acquired_by_self(), "invariant");
+      assert(!buffer->transient(), "invariant");
+      assert(buffer->lease(), "invariant");
+      storage_instance.control().increment_leased();
+      return buffer;
+    }
+  }
+  return acquire_transient(size, thread);
+}
+
+static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->empty(), "invariant");
+  const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
+  if (EventDataLoss::is_enabled()) {
+    JfrNativeEventWriter writer(buffer, thread);
+    writer.write<u8>(EventDataLoss::eventId);
+    writer.write(JfrTicks::now());
+    writer.write(unflushed_size);
+    writer.write(total_data_loss);
+  }
+}
+
+static void write_data_loss(BufferPtr buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  const size_t unflushed_size = buffer->unflushed_size();
+  buffer->concurrent_reinitialization();
+  if (unflushed_size == 0) {
+    return;
+  }
+  write_data_loss_event(buffer, unflushed_size, thread);
+}
+
+static const size_t promotion_retry = 100;
+
+bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(!buffer->lease(), "invariant");
+  assert(!buffer->transient(), "invariant");
+  const size_t unflushed_size = buffer->unflushed_size();
+  if (unflushed_size == 0) {
+    buffer->concurrent_reinitialization();
+    assert(buffer->empty(), "invariant");
+    return true;
+  }
+  BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
+  if (promotion_buffer == NULL) {
+    write_data_loss(buffer, thread);
+    return false;
+  }
+  if (!JfrRecorder::is_shutting_down()) {
+      assert(promotion_buffer->acquired_by_self(), "invariant");
+  }
+  assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
+  buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
+  assert(buffer->empty(), "invariant");
+  return true;
+}
+
+/*
+* 1. If the buffer was a "lease" from the global system, release back.
+* 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
+*
+* The buffer is effectively invalidated for the thread post-return,
+* and the caller should take means to ensure that it is not referenced any longer.
+*/
+void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->lease(), "invariant");
+  assert(buffer->acquired_by_self(), "invariant");
+  buffer->clear_lease();
+  if (buffer->transient()) {
+    buffer->set_retired();
+    register_full(buffer, thread);
+  } else {
+    buffer->release();
+    control().decrement_leased();
+  }
+}
+
+static JfrAgeNode* new_age_node(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(age_mspace != NULL, "invariant");
+  return mspace_allocate_transient(0, age_mspace, thread);
+}
+
+static void log_registration_failure(size_t unflushed_size) {
+  if (LogJFR) tty->print_cr("Unable to register a full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
+  if (LogJFR) tty->print_cr("Cleared 1 full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
+}
+
+static void handle_registration_failure(BufferPtr buffer) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->retired(), "invariant");
+  const size_t unflushed_size = buffer->unflushed_size();
+  buffer->reinitialize();
+  log_registration_failure(unflushed_size);
+}
+
+static JfrAgeNode* get_free_age_node(JfrStorageAgeMspace* age_mspace, Thread* thread) {
+  assert(JfrBuffer_lock->owned_by_self(), "invariant");
+  return mspace_get_free_with_detach(0, age_mspace, thread);
+}
+
+static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
+  assert(JfrBuffer_lock->owned_by_self(), "invariant");
+  assert(age_node->retired_buffer()->retired(), "invariant");
+  age_mspace->insert_full_head(age_node);
+  return true;
+}
+
+static bool full_buffer_registration(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, JfrStorageControl& control, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->retired(), "invariant");
+  assert(age_mspace != NULL, "invariant");
+  MutexLockerEx lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
+  JfrAgeNode* age_node = get_free_age_node(age_mspace, thread);
+  if (age_node == NULL) {
+    age_node = new_age_node(buffer, age_mspace, thread);
+    if (age_node == NULL) {
+      return false;
+    }
+  }
+  assert(age_node->acquired_by_self(), "invariant");
+  assert(age_node != NULL, "invariant");
+  age_node->set_retired_buffer(buffer);
+  control.increment_full();
+  return insert_full_age_node(age_node, age_mspace, thread);
+}
+
+void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->retired(), "invariant");
+  if (!full_buffer_registration(buffer, _age_mspace, control(), thread)) {
+    handle_registration_failure(buffer);
+    buffer->release();
+  }
+  if (control().should_post_buffer_full_message()) {
+    _post_box.post(MSG_FULLBUFFER);
+  }
+}
+
+void JfrStorage::lock() {
+  assert(!JfrBuffer_lock->owned_by_self(), "invariant");
+  JfrBuffer_lock->lock_without_safepoint_check();
+}
+
+void JfrStorage::unlock() {
+  assert(JfrBuffer_lock->owned_by_self(), "invariant");
+  JfrBuffer_lock->unlock();
+}
+
+#ifdef ASSERT
+bool JfrStorage::is_locked() const {
+  return JfrBuffer_lock->owned_by_self();
+}
+#endif
+
+// don't use buffer on return, it is gone
+void JfrStorage::release(BufferPtr buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(!buffer->lease(), "invariant");
+  assert(!buffer->transient(), "invariant");
+  assert(!buffer->retired(), "invariant");
+  if (!buffer->empty()) {
+    if (!flush_regular_buffer(buffer, thread)) {
+      buffer->concurrent_reinitialization();
+    }
+  }
+  assert(buffer->empty(), "invariant");
+  control().increment_dead();
+  buffer->release();
+  buffer->set_retired();
+}
+
+void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  JfrStorage& storage_instance = instance();
+  storage_instance.release(buffer, thread);
+  if (storage_instance.control().should_scavenge()) {
+    storage_instance._post_box.post(MSG_DEADBUFFER);
+  }
+}
+
+static void log_discard(size_t count, size_t amount, size_t current) {
+  assert(count > 0, "invariant");
+  if (LogJFR) tty->print_cr("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", count, amount);
+  if (LogJFR) tty->print_cr("Current number of full buffers " SIZE_FORMAT "", current);
+}
+
+void JfrStorage::discard_oldest(Thread* thread) {
+  if (JfrBuffer_lock->try_lock()) {
+    if (!control().should_discard()) {
+      // another thread handled it
+      return;
+    }
+    const size_t num_full_pre_discard = control().full_count();
+    size_t num_full_post_discard = 0;
+    size_t discarded_size = 0;
+    while (true) {
+      JfrAgeNode* const oldest_age_node = _age_mspace->full_tail();
+      if (oldest_age_node == NULL) {
+        break;
+      }
+      BufferPtr const buffer = oldest_age_node->retired_buffer();
+      assert(buffer->retired(), "invariant");
+      discarded_size += buffer->unflushed_size();
+      num_full_post_discard = control().decrement_full();
+      if (buffer->transient()) {
+        mspace_release_full(buffer, _transient_mspace);
+        mspace_release_full(oldest_age_node, _age_mspace);
+        continue;
+      } else {
+        mspace_release_full(oldest_age_node, _age_mspace);
+        buffer->reinitialize();
+        buffer->release(); // pusb
+        break;
+      }
+    }
+    JfrBuffer_lock->unlock();
+    const size_t number_of_discards = num_full_pre_discard - num_full_post_discard;
+    if (number_of_discards > 0) {
+      log_discard(number_of_discards, discarded_size, num_full_post_discard);
+    }
+  }
+}
+
+#ifdef ASSERT
+typedef const BufferPtr ConstBufferPtr;
+
+static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
+  assert(t != NULL, "invariant");
+  assert(cur != NULL, "invariant");
+  assert(cur->pos() + used <= cur->end(), "invariant");
+  assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
+}
+
+static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
+  assert(t != NULL, "invariant");
+  assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
+  assert(cur != NULL, "invariant");
+  assert(!cur->lease(), "invariant");
+  assert(cur_pos != NULL, "invariant");
+  assert(req >= used, "invariant");
+}
+
+static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
+  assert(cur != NULL, "invariant");
+  assert(t != NULL, "invariant");
+  assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
+  assert(req >= used, "invariant");
+}
+
+static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
+  assert(t != NULL, "invariant");
+  assert(cur != NULL, "invariant");
+  assert(cur->lease(), "invariant");
+  assert(cur_pos != NULL, "invariant");
+  assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
+  assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
+  assert(req >= used, "invariant");
+  assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
+}
+#endif // ASSERT
+
+BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
+  debug_only(assert_flush_precondition(cur, used, native, t);)
+  const u1* const cur_pos = cur->pos();
+  req += used;
+  // requested size now encompass the outstanding used size
+  return cur->lease() ? instance().flush_large(cur, cur_pos, used, req, native, t) :
+                          instance().flush_regular(cur, cur_pos, used, req, native, t);
+}
+
+BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
+  debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
+  // A flush is needed before memcpy since a non-large buffer is thread stable
+  // (thread local). The flush will not modify memory in addresses above pos()
+  // which is where the "used / uncommitted" data resides. It is therefore both
+  // possible and valid to migrate data after the flush. This is however only
+  // the case for stable thread local buffers; it is not the case for large buffers.
+  if (!cur->empty()) {
+    flush_regular_buffer(cur, t);
+  }
+  assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
+  if (cur->free_size() >= req) {
+    // simplest case, no switching of buffers
+    if (used > 0) {
+      memcpy(cur->pos(), (void*)cur_pos, used);
+    }
+    assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
+    return cur;
+  }
+  // Going for a "larger-than-regular" buffer.
+  // Shelve the current buffer to make room for a temporary lease.
+  t->jfr_thread_local()->shelve_buffer(cur);
+  return provision_large(cur, cur_pos, used, req, native, t);
+}
+
+static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
+  assert(buffer != NULL, "invariant");
+  if (native) {
+    jfr_thread_local->set_native_buffer(buffer);
+  } else {
+    jfr_thread_local->set_java_buffer(buffer);
+  }
+  return buffer;
+}
+
+static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
+  JfrThreadLocal* const tl = t->jfr_thread_local();
+  BufferPtr shelved = tl->shelved_buffer();
+  assert(shelved != NULL, "invariant");
+  tl->shelve_buffer(NULL);
+  // restore shelved buffer back as primary
+  return store_buffer_to_thread_local(shelved, tl, native);
+}
+
+BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
+  debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
+  // Can the "regular" buffer (now shelved) accommodate the requested size?
+  BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
+  assert(shelved != NULL, "invariant");
+  if (shelved->free_size() >= req) {
+    if (req > 0) {
+      memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
+    }
+    // release and invalidate
+    release_large(cur, t);
+    return restore_shelved_buffer(native, t);
+  }
+  // regular too small
+  return provision_large(cur, cur_pos,  used, req, native, t);
+}
+
+static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
+  assert(cur != NULL, "invariant");
+  assert(t != NULL, "invariant");
+  if (cur->lease()) {
+    storage_instance.release_large(cur, t);
+  }
+  return restore_shelved_buffer(native, t);
+}
+
+// Always returns a non-null buffer.
+// If accommodating the large request fails, the shelved buffer is returned
+// even though it might be smaller than the requested size.
+// Caller needs to ensure if the size was successfully accommodated.
+BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
+  debug_only(assert_provision_large_precondition(cur, used, req, t);)
+  assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
+  BufferPtr const buffer = acquire_large(req, t);
+  if (buffer == NULL) {
+    // unable to allocate and serve the request
+    return large_fail(cur, native, *this, t);
+  }
+  // ok managed to acquire a "large" buffer for the requested size
+  assert(buffer->free_size() >= req, "invariant");
+  assert(buffer->lease(), "invariant");
+  // transfer outstanding data
+  memcpy(buffer->pos(), (void*)cur_pos, used);
+  if (cur->lease()) {
+    release_large(cur, t);
+    // don't use current anymore, it is gone
+  }
+  return store_buffer_to_thread_local(buffer, t->jfr_thread_local(), native);
+}
+
+typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
+typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
+typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
+typedef ConcurrentWriteOpExcludeRetired<WriteOperation> ThreadLocalConcurrentWriteOperation;
+
+size_t JfrStorage::write() {
+  const size_t full_size_processed = write_full();
+  WriteOperation wo(_chunkwriter);
+  ThreadLocalConcurrentWriteOperation tlwo(wo);
+  process_full_list(tlwo, _thread_local_mspace);
+  ConcurrentWriteOperation cwo(wo);
+  process_free_list(cwo, _global_mspace);
+  return full_size_processed + wo.processed();
+}
+
+size_t JfrStorage::write_at_safepoint() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  WriteOperation wo(_chunkwriter);
+  MutexedWriteOperation writer(wo); // mutexed write mode
+  process_full_list(writer, _thread_local_mspace);
+  assert(_transient_mspace->is_free_empty(), "invariant");
+  process_full_list(writer, _transient_mspace);
+  assert(_global_mspace->is_full_empty(), "invariant");
+  process_free_list(writer, _global_mspace);
+  return wo.processed();
+}
+
+typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
+typedef ReleaseOp<JfrStorageMspace> ReleaseOperation;
+typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> FullOperation;
+
+size_t JfrStorage::clear() {
+  const size_t full_size_processed = clear_full();
+  DiscardOperation discarder(concurrent); // concurrent discard mode
+  process_full_list(discarder, _thread_local_mspace);
+  assert(_transient_mspace->is_free_empty(), "invariant");
+  process_full_list(discarder, _transient_mspace);
+  assert(_global_mspace->is_full_empty(), "invariant");
+  process_free_list(discarder, _global_mspace);
+  return full_size_processed + discarder.processed();
+}
+
+static void insert_free_age_nodes(JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, JfrAgeNode* tail, size_t count) {
+  if (tail != NULL) {
+    assert(tail->next() == NULL, "invariant");
+    assert(head != NULL, "invariant");
+    assert(head->prev() == NULL, "invariant");
+    MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
+    age_mspace->insert_free_tail(head, tail, count);
+  }
+}
+
+template <typename Processor>
+static void process_age_list(Processor& processor, JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, size_t count) {
+  assert(age_mspace != NULL, "invariant");
+  assert(head != NULL, "invariant");
+  assert(count > 0, "invariant");
+  JfrAgeNode* node = head;
+  JfrAgeNode* last = NULL;
+  while (node != NULL) {
+    last = node;
+    BufferPtr const buffer = node->retired_buffer();
+    assert(buffer != NULL, "invariant");
+    assert(buffer->retired(), "invariant");
+    processor.process(buffer);
+    // at this point, buffer is already live or destroyed
+    node->clear_identity();
+    JfrAgeNode* const next = (JfrAgeNode*)node->next();
+    if (node->transient()) {
+      // detach
+      last = (JfrAgeNode*)last->prev();
+      if (last != NULL) {
+        last->set_next(next);
+      } else {
+        head = next;
+      }
+      if (next != NULL) {
+        next->set_prev(last);
+      }
+      --count;
+      age_mspace->deallocate(node);
+    }
+    node = next;
+  }
+  insert_free_age_nodes(age_mspace, head, last, count);
+}
+
+template <typename Processor>
+static size_t process_full(Processor& processor, JfrStorageControl& control, JfrStorageAgeMspace* age_mspace) {
+  assert(age_mspace != NULL, "invariant");
+  if (age_mspace->is_full_empty()) {
+    // nothing to do
+    return 0;
+  }
+  size_t count;
+  JfrAgeNode* head;
+  {
+    // fetch age list
+    MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
+    count = age_mspace->full_count();
+    head = age_mspace->clear_full();
+    control.reset_full();
+  }
+  assert(head != NULL, "invariant");
+  assert(count > 0, "invariant");
+  process_age_list(processor, age_mspace, head, count);
+  return count;
+}
+
+static void log(size_t count, size_t amount, bool clear = false) {
+  if (count > 0) {
+    if (LogJFR) tty->print_cr("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
+      clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
+  }
+}
+
+// full writer
+// Assumption is retired only; exclusive access
+// MutexedWriter -> ReleaseOp
+//
+size_t JfrStorage::write_full() {
+  assert(_chunkwriter.is_valid(), "invariant");
+  Thread* const thread = Thread::current();
+  WriteOperation wo(_chunkwriter);
+  MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
+  ReleaseOperation ro(_transient_mspace, thread);
+  FullOperation cmd(&writer, &ro);
+  const size_t count = process_full(cmd, control(), _age_mspace);
+  log(count, writer.processed());
+  return writer.processed();
+}
+
+size_t JfrStorage::clear_full() {
+  DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
+  const size_t count = process_full(discarder, control(), _age_mspace);
+  log(count, discarder.processed(), true);
+  return discarder.processed();
+}
+
+static void scavenge_log(size_t count, size_t amount, size_t current) {
+  if (count > 0) {
+    if (LogJFR) tty->print_cr("Released " SIZE_FORMAT " dead buffer(s) of " SIZE_FORMAT" B of data.", count, amount);
+    if (LogJFR) tty->print_cr("Current number of dead buffers " SIZE_FORMAT "", current);
+  }
+}
+
+template <typename Mspace>
+class Scavenger {
+private:
+  JfrStorageControl& _control;
+  Mspace* _mspace;
+  size_t _count;
+  size_t _amount;
+public:
+  typedef typename Mspace::Type Type;
+  Scavenger(JfrStorageControl& control, Mspace* mspace) : _control(control), _mspace(mspace), _count(0), _amount(0) {}
+  bool process(Type* t) {
+    if (t->retired()) {
+      assert(!t->transient(), "invariant");
+      assert(!t->lease(), "invariant");
+      assert(t->empty(), "invariant");
+      assert(t->identity() == NULL, "invariant");
+      ++_count;
+      _amount += t->total_size();
+      t->clear_retired();
+      _control.decrement_dead();
+      mspace_release_full_critical(t, _mspace);
+    }
+    return true;
+  }
+  size_t processed() const { return _count; }
+  size_t amount() const { return _amount; }
+};
+
+size_t JfrStorage::scavenge() {
+  JfrStorageControl& ctrl = control();
+  if (ctrl.dead_count() == 0) {
+    return 0;
+  }
+  Scavenger<JfrThreadLocalMspace> scavenger(ctrl, _thread_local_mspace);
+  process_full_list(scavenger, _thread_local_mspace);
+  scavenge_log(scavenger.processed(), scavenger.amount(), ctrl.dead_count());
+  return scavenger.processed();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrStorage.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGE_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGE_HPP
+
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/recorder/storage/jfrMemorySpace.hpp"
+#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
+
+class JfrChunkWriter;
+class JfrPostBox;
+class JfrStorage;
+class JfrStorageControl;
+
+typedef JfrMemorySpace<JfrBuffer, JfrMspaceAlternatingRetrieval, JfrStorage> JfrStorageMspace;
+typedef JfrMemorySpace<JfrBuffer, JfrThreadLocalRetrieval, JfrStorage> JfrThreadLocalMspace;
+typedef JfrMemorySpace<JfrAgeNode, JfrMspaceSequentialRetrieval, JfrStorage> JfrStorageAgeMspace;
+
+//
+// Responsible for providing backing storage for writing events.
+//
+class JfrStorage : public JfrCHeapObj {
+ public:
+  typedef JfrStorageMspace::Type Buffer;
+ private:
+  JfrStorageControl* _control;
+  JfrStorageMspace* _global_mspace;
+  JfrThreadLocalMspace* _thread_local_mspace;
+  JfrStorageMspace* _transient_mspace;
+  JfrStorageAgeMspace* _age_mspace;
+  JfrChunkWriter& _chunkwriter;
+  JfrPostBox& _post_box;
+
+  // mspace callbacks
+  void register_full(Buffer* t, Thread* thread);
+  void lock();
+  void unlock();
+  DEBUG_ONLY(bool is_locked() const;)
+
+  Buffer* acquire_large(size_t size, Thread* t);
+  Buffer* acquire_transient(size_t size, Thread* thread);
+  bool flush_regular_buffer(Buffer* const buffer, Thread* t);
+  Buffer* flush_regular(Buffer* cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* t);
+  Buffer* flush_large(Buffer* cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* t);
+  Buffer* provision_large(Buffer* cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* t);
+  void release(Buffer* buffer, Thread* t);
+
+  size_t clear();
+  size_t clear_full();
+  size_t write();
+  size_t write_full();
+  size_t write_at_safepoint();
+  size_t scavenge();
+
+  JfrStorage(JfrChunkWriter& cw, JfrPostBox& post_box);
+  ~JfrStorage();
+
+  static JfrStorage& instance();
+  static JfrStorage* create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box);
+  bool initialize();
+  static void destroy();
+
+ public:
+  static Buffer* acquire_thread_local(Thread* t, size_t size = 0);
+  static void release_thread_local(Buffer* buffer, Thread* t);
+  void release_large(Buffer* const buffer, Thread* t);
+  static Buffer* flush(Buffer* cur, size_t used, size_t req, bool native, Thread* t);
+  void discard_oldest(Thread* t);
+  static JfrStorageControl& control();
+
+  friend class JfrRecorder;
+  friend class JfrRecorderService;
+  template <typename, template <typename> class, typename>
+  friend class JfrMemorySpace;
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrStorageControl.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/storage/jfrStorageControl.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+// returns the updated value
+static jlong atomic_add(size_t value, size_t volatile* const dest) {
+  size_t compare_value;
+  size_t exchange_value;
+  do {
+    compare_value = OrderAccess::load_ptr_acquire((intptr_t*)dest);
+    exchange_value = compare_value + value;
+  } while ((unsigned long)Atomic::cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value) != compare_value);
+  return exchange_value;
+}
+
+static jlong atomic_dec(size_t volatile* const dest) {
+  size_t compare_value;
+  size_t exchange_value;
+  do {
+    compare_value = OrderAccess::load_ptr_acquire((intptr_t*)dest);
+    assert(compare_value >= 1, "invariant");
+    exchange_value = compare_value - 1;
+  } while ((unsigned long)Atomic::cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value) != compare_value);
+  return exchange_value;
+}
+
+const size_t max_lease_factor = 2;
+JfrStorageControl::JfrStorageControl(size_t global_count_total, size_t in_memory_discard_threshold) :
+  _global_count_total(global_count_total),
+  _full_count(0),
+  _global_lease_count(0),
+  _dead_count(0),
+  _to_disk_threshold(0),
+  _in_memory_discard_threshold(in_memory_discard_threshold),
+  _global_lease_threshold(global_count_total / max_lease_factor),
+  _scavenge_threshold(0),
+  _to_disk(false) {}
+
+bool JfrStorageControl::to_disk() const {
+  return _to_disk;
+}
+
+void JfrStorageControl::set_to_disk(bool enable) {
+  _to_disk = enable;
+}
+
+size_t JfrStorageControl::full_count() const {
+  return _full_count;
+}
+
+// mutexed access
+size_t JfrStorageControl::increment_full() {
+  assert(JfrBuffer_lock->owned_by_self(), "invariant");
+  return ++_full_count;
+}
+
+size_t JfrStorageControl::decrement_full() {
+  assert(JfrBuffer_lock->owned_by_self(), "invariant");
+  assert(_full_count > 0, "invariant");
+  return --_full_count;
+}
+
+void JfrStorageControl::reset_full() {
+  assert(JfrBuffer_lock->owned_by_self(), "invariant");
+  _full_count = 0;
+}
+
+bool JfrStorageControl::should_post_buffer_full_message() const {
+  return to_disk() && (full_count() > _to_disk_threshold);
+}
+
+bool JfrStorageControl::should_discard() const {
+  return !to_disk() && full_count() >= _in_memory_discard_threshold;
+}
+
+// concurrent with accuracy requirement
+
+size_t JfrStorageControl::global_lease_count() const {
+  return (size_t)OrderAccess::load_ptr_acquire((intptr_t*)&_global_lease_count);
+}
+
+size_t JfrStorageControl::increment_leased() {
+  return atomic_add(1, &_global_lease_count);
+}
+
+size_t JfrStorageControl::decrement_leased() {
+  return atomic_dec(&_global_lease_count);
+}
+
+bool JfrStorageControl::is_global_lease_allowed() const {
+  return global_lease_count() <= _global_lease_threshold;
+}
+
+// concurrent with lax requirement
+
+size_t JfrStorageControl::dead_count() const {
+  return _dead_count;
+}
+
+size_t JfrStorageControl::increment_dead() {
+  return atomic_add(1, &_dead_count);
+}
+
+size_t JfrStorageControl::decrement_dead() {
+  return atomic_dec(&_dead_count);
+}
+
+bool JfrStorageControl::should_scavenge() const {
+  return dead_count() >= _scavenge_threshold;
+}
+
+void JfrStorageControl::set_scavenge_threshold(size_t number_of_dead_buffers) {
+  _scavenge_threshold = number_of_dead_buffers;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrStorageControl.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGECONTROL_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGECONTROL_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JfrStorageControl : public JfrCHeapObj {
+ private:
+  size_t _global_count_total;
+  size_t _full_count;
+  volatile size_t _global_lease_count;
+  volatile size_t _dead_count;
+  size_t _to_disk_threshold;
+  size_t _in_memory_discard_threshold;
+  size_t _global_lease_threshold;
+  size_t _scavenge_threshold;
+  bool _to_disk;
+
+ public:
+  JfrStorageControl(size_t global_count_total, size_t in_memory_discard_threshold);
+
+  void set_to_disk(bool enable);
+  bool to_disk() const;
+
+  size_t full_count() const;
+  size_t increment_full();
+  size_t decrement_full();
+  void   reset_full();
+  bool should_post_buffer_full_message() const;
+  bool should_discard() const;
+
+  size_t global_lease_count() const;
+  size_t increment_leased();
+  size_t decrement_leased();
+  bool is_global_lease_allowed() const;
+
+  size_t dead_count() const;
+  size_t increment_dead();
+  size_t decrement_dead();
+
+  void set_scavenge_threshold(size_t number_of_dead_buffers);
+  bool should_scavenge() const;
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGECONTROL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrStorageUtils.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_HPP
+
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/thread.hpp"
+
+template <typename Operation, typename NextOperation>
+class CompositeOperation {
+ private:
+  Operation* _op;
+  NextOperation* _next;
+ public:
+  CompositeOperation(Operation* op, NextOperation* next) : _op(op), _next(next) {
+    assert(_op != NULL, "invariant");
+  }
+  typedef typename Operation::Type Type;
+  bool process(Type* t = NULL) {
+    return _next == NULL ? _op->process(t) : _op->process(t) && _next->process(t);
+  }
+  size_t processed() const {
+    return _next == NULL ? _op->processed() : _op->processed() + _next->processed();
+  }
+};
+
+template <typename T>
+class UnBufferedWriteToChunk {
+ private:
+  JfrChunkWriter& _writer;
+  size_t _processed;
+ public:
+  typedef T Type;
+  UnBufferedWriteToChunk(JfrChunkWriter& writer) : _writer(writer), _processed(0) {}
+  bool write(Type* t, const u1* data, size_t size);
+  size_t processed() { return _processed; }
+};
+
+template <typename T>
+class DefaultDiscarder {
+ private:
+  size_t _processed;
+ public:
+  typedef T Type;
+  DefaultDiscarder() : _processed() {}
+  bool discard(Type* t, const u1* data, size_t size);
+  size_t processed() const { return _processed; }
+};
+
+template <typename Operation>
+class ConcurrentWriteOp {
+ private:
+  Operation& _operation;
+ public:
+  typedef typename Operation::Type Type;
+  ConcurrentWriteOp(Operation& operation) : _operation(operation) {}
+  bool process(Type* t);
+  size_t processed() const { return _operation.processed(); }
+};
+
+template <typename Operation>
+class ConcurrentWriteOpExcludeRetired : private ConcurrentWriteOp<Operation> {
+ public:
+  typedef typename Operation::Type Type;
+  ConcurrentWriteOpExcludeRetired(Operation& operation) : ConcurrentWriteOp<Operation>(operation) {}
+  bool process(Type* t);
+  size_t processed() const { return ConcurrentWriteOp<Operation>::processed(); }
+};
+
+
+template <typename Operation>
+class MutexedWriteOp {
+ private:
+  Operation& _operation;
+ public:
+  typedef typename Operation::Type Type;
+  MutexedWriteOp(Operation& operation) : _operation(operation) {}
+  bool process(Type* t);
+  size_t processed() const { return _operation.processed(); }
+};
+
+enum jfr_operation_mode {
+  mutexed = 1,
+  concurrent
+};
+
+template <typename Operation>
+class DiscardOp {
+ private:
+  Operation _operation;
+  jfr_operation_mode _mode;
+ public:
+  typedef typename Operation::Type Type;
+  DiscardOp(jfr_operation_mode mode = concurrent) : _operation(), _mode(mode) {}
+  bool process(Type* t);
+  size_t processed() const { return _operation.processed(); }
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrStorageUtils.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
+
+#include "jfr/recorder/storage/jfrStorageUtils.hpp"
+
+template <typename T>
+inline bool UnBufferedWriteToChunk<T>::write(T* t, const u1* data, size_t size) {
+  _writer.write_unbuffered(data, size);
+  _processed += size;
+  return true;
+}
+
+template <typename T>
+inline bool DefaultDiscarder<T>::discard(T* t, const u1* data, size_t size) {
+  _processed += size;
+  return true;
+}
+
+template <typename Operation>
+inline bool ConcurrentWriteOp<Operation>::process(typename Operation::Type* t) {
+  const u1* const current_top = t->concurrent_top();
+  const size_t unflushed_size = t->pos() - current_top;
+  if (unflushed_size == 0) {
+    t->set_concurrent_top(current_top);
+    return true;
+  }
+  const bool result = _operation.write(t, current_top, unflushed_size);
+  t->set_concurrent_top(current_top + unflushed_size);
+  return result;
+}
+
+template <typename Operation>
+inline bool ConcurrentWriteOpExcludeRetired<Operation>::process(typename Operation::Type* t) {
+  if (t->retired()) {
+    assert(t->empty(), "invariant");
+    return true;
+  }
+  return ConcurrentWriteOp<Operation>::process(t);
+}
+
+template <typename Operation>
+inline bool MutexedWriteOp<Operation>::process(typename Operation::Type* t) {
+  assert(t != NULL, "invariant");
+  const u1* const current_top = t->top();
+  const size_t unflushed_size = t->pos() - current_top;
+  if (unflushed_size == 0) {
+    return true;
+  }
+  const bool result = _operation.write(t, current_top, unflushed_size);
+  t->set_top(current_top + unflushed_size);
+  return result;
+}
+
+template <typename Operation>
+inline bool DiscardOp<Operation>::process(typename Operation::Type* t) {
+  assert(t != NULL, "invariant");
+  const u1* const current_top = _mode == concurrent ? t->concurrent_top() : t->top();
+  const size_t unflushed_size = t->pos() - current_top;
+  if (unflushed_size == 0) {
+    if (_mode == concurrent) {
+      t->set_concurrent_top(current_top);
+    }
+    return true;
+  }
+  const bool result = _operation.discard(t, current_top, unflushed_size);
+  if (_mode == concurrent) {
+    t->set_concurrent_top(current_top + unflushed_size);
+  } else {
+    t->set_top(current_top + unflushed_size);
+  }
+  return result;
+}
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrVirtualMemory.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,534 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/virtualspace.hpp"
+#include "runtime/os.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/align.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+/*
+ * A memory segment represents a virtual memory reservation.
+ * It provides ways to commit and decommit physical storage
+ * onto its virtual memory reservation.
+ */
+
+class JfrVirtualMemorySegment : public JfrCHeapObj {
+  friend class JfrVirtualMemoryManager;
+ private:
+  JfrVirtualMemorySegment* _next;
+  char* _top;
+  ReservedSpace _rs;
+  VirtualSpace  _virtual_memory;
+
+  // Convenience functions to access the underlying virtual space metadata
+  const u1* committed_low()  const { return (const u1*)_virtual_memory.low(); }
+  const u1* committed_high() const { return (const u1*)_virtual_memory.high(); }
+  const u1* reserved_low() const { return (const u1*)_virtual_memory.low_boundary(); }
+  const u1* reserved_high() const { return (const u1*)_virtual_memory.high_boundary(); }
+  size_t reserved_words() const  { return _virtual_memory.reserved_size() / BytesPerWord; }
+  size_t committed_words() const { return _virtual_memory.actual_committed_size() / BytesPerWord; }
+  bool is_pre_committed() const { return _virtual_memory.special(); }
+  VirtualSpace& virtual_space() { return _virtual_memory; }
+
+  JfrVirtualMemorySegment();
+  ~JfrVirtualMemorySegment();
+
+  JfrVirtualMemorySegment* next() const { return _next; }
+  void set_next(JfrVirtualMemorySegment* v) { _next = v; }
+
+  // Returns true if requested size is available in the committed area
+  bool is_available(size_t block_size_request_words) {
+    return block_size_request_words <= pointer_delta(committed_high(), _top, sizeof(char*));
+  }
+
+  // allocation pointer committed memory
+  char* top() const { return _top; }
+  void inc_top(size_t size_in_words) {
+    assert(is_available(size_in_words), "invariant");
+    _top += size_in_words * BytesPerWord;
+    assert(_top <= _virtual_memory.high(), "invariant");
+  }
+
+  // initialization is the virtual memory reservation
+  bool initialize(size_t reservation_size_request_bytes);
+  void* take_from_committed(size_t block_size_request_words);
+
+  // Returns committed memory
+  void* commit(size_t block_size_request_words) {
+    return take_from_committed(block_size_request_words);
+  }
+
+  // Commit more memory in a reservation
+  bool expand_by(size_t block_size_request_words);
+
+  // Decommits all committed memory in this reservation segment.
+  void decommit();
+};
+
+JfrVirtualMemorySegment::JfrVirtualMemorySegment() :
+  _next(NULL),
+  _top(NULL),
+  _rs(),
+  _virtual_memory() {}
+
+JfrVirtualMemorySegment::~JfrVirtualMemorySegment() {
+  decommit();
+  _rs.release();
+}
+
+bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes) {
+  assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
+  _rs = ReservedSpace(reservation_size_request_bytes,
+                      os::vm_allocation_granularity(),
+                      UseLargePages && os::can_commit_large_page_memory(),
+                      false);
+  if (!_rs.is_reserved()) {
+    return false;
+  }
+  assert(_rs.base() != NULL, "invariant");
+  assert(_rs.size() != 0, "invariant");
+  assert(is_aligned(_rs.base(), os::vm_allocation_granularity()), "invariant");
+  assert(is_aligned(_rs.size(), os::vm_allocation_granularity()), "invariant");
+  os::trace_page_sizes("Jfr", reservation_size_request_bytes,
+                              reservation_size_request_bytes,
+                              os::vm_page_size(),
+                              _rs.base(),
+                              _rs.size());
+  MemTracker::record_virtual_memory_type((address)_rs.base(), mtTracing);
+  assert(is_aligned(_rs.base(), os::vm_page_size()), "invariant");
+  assert(is_aligned(_rs.size(), os::vm_page_size()), "invariant");
+
+  // ReservedSpaces marked as special will have the entire memory
+  // pre-committed. Setting a committed size will make sure that
+  // committed_size and actual_committed_size agrees.
+  const size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
+  const bool result = virtual_space().initialize_with_granularity(_rs, pre_committed_size, os::vm_page_size());
+
+  if (result) {
+    assert(virtual_space().committed_size() == virtual_space().actual_committed_size(),
+      "Checking that the pre-committed memory was registered by the VirtualSpace");
+    _top = virtual_space().low();
+  }
+  return result;
+}
+
+bool JfrVirtualMemorySegment::expand_by(size_t block_size_request_words) {
+  size_t block_size_request_bytes = block_size_request_words * BytesPerWord;
+  const size_t uncommitted = virtual_space().reserved_size() - virtual_space().actual_committed_size();
+  if (uncommitted < block_size_request_bytes) {
+    // commit whatever is left in the reservation
+    block_size_request_bytes = uncommitted;
+  }
+  assert(is_aligned(block_size_request_bytes, os::vm_allocation_granularity()), "invariant");
+  // commit block in reserved memory
+  bool result = virtual_space().expand_by(block_size_request_bytes, false);
+  assert(result, "Failed to commit memory");
+  return result;
+}
+
+void JfrVirtualMemorySegment::decommit() {
+  assert(_virtual_memory.committed_size() == _virtual_memory.actual_committed_size(),
+    "The committed memory doesn't match the expanded memory.");
+
+  const size_t committed_size = virtual_space().actual_committed_size();
+  if (committed_size > 0) {
+    virtual_space().shrink_by(committed_size);
+  }
+
+  assert(_virtual_memory.actual_committed_size() == 0, "invariant");
+}
+
+// Attempt to get a committed block
+void* JfrVirtualMemorySegment::take_from_committed(size_t block_size_request_words) {
+  // The virtual spaces are always expanded by the
+  // commit granularity to enforce the following condition.
+  // Without this the is_available check will not work correctly.
+  assert(_virtual_memory.committed_size() == _virtual_memory.actual_committed_size(),
+    "The committed memory doesn't match the expanded memory.");
+  if (!is_available(block_size_request_words)) {
+    return NULL;
+  }
+  void* const block = top();
+  assert(block != NULL, "invariant");
+  inc_top(block_size_request_words);
+  return block;
+}
+
+class JfrVirtualMemoryManager : public JfrCHeapObj {
+ typedef JfrVirtualMemorySegment Segment;
+ private:
+  Segment* _segments;
+  Segment* _current_segment;
+  size_t _reservation_size_request_words;
+  size_t _reservation_size_request_limit_words; // total reservation limit
+
+  // Sum of reserved and committed memory in the segments
+  size_t _current_reserved_words;
+  size_t _current_committed_words;
+
+  void link(Segment* segment);
+  Segment* current();
+
+  void inc_reserved_words(size_t words);
+  void inc_committed_words(size_t words);
+
+  bool new_segment(size_t reservation_size_request_words);
+
+  bool expand_segment_by(Segment* segment, size_t block_size_request_words);
+
+  bool expand_by(size_t block_size_request_words, size_t reservation_size_request_words);
+  bool can_reserve() const;
+
+ public:
+  JfrVirtualMemoryManager();
+  ~JfrVirtualMemoryManager();
+
+  bool initialize(size_t reservation_size_request_words, size_t segment_count = 1);
+  void* commit(size_t requested_block_size_words);
+
+  bool is_full() const {
+    return reserved_high() == committed_high();
+  }
+
+  const u1* committed_low() const { return _current_segment->committed_low(); }
+  const u1* committed_high() const { return _current_segment->committed_high(); }
+  const u1* reserved_low() const { return _current_segment->reserved_low(); }
+  const u1* reserved_high() const { return _current_segment->reserved_high(); }
+};
+
+JfrVirtualMemoryManager::JfrVirtualMemoryManager() :
+  _segments(NULL),
+  _current_segment(NULL),
+  _reservation_size_request_words(0),
+  _reservation_size_request_limit_words(0),
+  _current_reserved_words(0),
+  _current_committed_words(0) {}
+
+JfrVirtualMemoryManager::~JfrVirtualMemoryManager() {
+  JfrVirtualMemorySegment* segment = _segments;
+  while (segment != NULL) {
+    JfrVirtualMemorySegment* next_segment = segment->next();
+    delete segment;
+    segment = next_segment;
+  }
+}
+
+// for now only allow a singleton segment per virtual memory client
+bool JfrVirtualMemoryManager::initialize(size_t reservation_size_request_words, size_t segment_count /* 1 */) {
+  assert(is_aligned(reservation_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
+  _reservation_size_request_words = reservation_size_request_words;
+  assert(segment_count > 0, "invariant");
+  _reservation_size_request_limit_words = reservation_size_request_words * segment_count;
+  assert(is_aligned(_reservation_size_request_limit_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
+  return new_segment(_reservation_size_request_words);
+}
+
+bool JfrVirtualMemoryManager::can_reserve() const  {
+  return _reservation_size_request_limit_words == 0 ? true : _current_reserved_words < _reservation_size_request_limit_words;
+}
+
+// Allocate another segment and add it to the list.
+bool JfrVirtualMemoryManager::new_segment(size_t reservation_size_request_words) {
+  assert(reservation_size_request_words > 0, "invariant");
+  assert(is_aligned(reservation_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
+  Segment* segment = new Segment();
+  if (NULL == segment) {
+    return false;
+  }
+  if (!segment->initialize(reservation_size_request_words * BytesPerWord)) {
+    delete segment;
+    return false;
+  }
+  assert(segment->reserved_words() == reservation_size_request_words,
+    "Actual reserved memory size differs from requested reservation memory size");
+  link(segment);
+  return true;
+}
+
+bool JfrVirtualMemoryManager::expand_segment_by(JfrVirtualMemorySegment* segment, size_t block_size_request_words) {
+  assert(segment != NULL, "invariant");
+  const size_t before = segment->committed_words();
+  const bool result = segment->expand_by(block_size_request_words);
+  const size_t after = segment->committed_words();
+  // after and before can be the same if the memory was pre-committed.
+  assert(after >= before, "Inconsistency");
+  inc_committed_words(after - before);
+  return result;
+}
+
+void JfrVirtualMemoryManager::inc_reserved_words(size_t words) {
+  _current_reserved_words += words;
+}
+
+JfrVirtualMemorySegment* JfrVirtualMemoryManager::current() {
+  return _current_segment;
+}
+
+void JfrVirtualMemoryManager::inc_committed_words(size_t words) {
+  _current_committed_words += words;
+}
+
+bool JfrVirtualMemoryManager::expand_by(size_t block_size_request_words, size_t reservation_size_request_words) {
+  assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_page_size()), "invariant");
+  assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
+  assert(is_aligned(reservation_size_request_words * BytesPerWord, os::vm_page_size()), "invariant");
+  assert(is_aligned(reservation_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
+  assert(block_size_request_words <= reservation_size_request_words, "invariant");
+  // Attempt to commit more memory from the the current virtual space reservation.
+  if (expand_segment_by(current(), block_size_request_words)) {
+    return true;
+  }
+
+  // reached limit of what is allowed to be reserved?
+  if (!can_reserve()) {
+    return false;
+  }
+
+  // Get another segment.
+  if (!new_segment(reservation_size_request_words)) {
+    return false;
+  }
+
+  if (current()->is_pre_committed()) {
+    // The memory was pre-committed, so we are done here.
+    assert(block_size_request_words <= current()->committed_words(),
+           "The new VirtualSpace was pre-committed, so it"
+           "should be large enough to fit the alloc request.");
+    return true;
+  }
+  return expand_segment_by(current(), block_size_request_words);
+}
+
+void JfrVirtualMemoryManager::link(JfrVirtualMemorySegment* segment) {
+  assert(segment != NULL, "invariant");
+  if (_segments == NULL) {
+    _segments = segment;
+  } else {
+    assert(_current_segment != NULL, "invariant");
+    assert(_segments == _current_segment, "invariant");
+    _current_segment->set_next(segment);
+  }
+  _current_segment = segment;
+  inc_reserved_words(segment->reserved_words());
+  inc_committed_words(segment->committed_words());
+}
+
+void* JfrVirtualMemoryManager::commit(size_t block_size_request_words) {
+  assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
+  void* block = current()->commit(block_size_request_words);
+  if (block != NULL) {
+    return block;
+  }
+  assert(block == NULL, "invariant");
+  if (is_full()) {
+    return NULL;
+  }
+  assert(block_size_request_words <= _reservation_size_request_words, "invariant");
+  if (expand_by(block_size_request_words, _reservation_size_request_words)) {
+    block = current()->commit(block_size_request_words);
+    assert(block != NULL, "The allocation was expected to succeed after the expansion");
+  }
+  return block;
+}
+
+JfrVirtualMemory::JfrVirtualMemory() :
+  _vmm(NULL),
+  _reserved_low(),
+  _reserved_high(),
+  _top(NULL),
+  _commit_point(NULL),
+  _physical_commit_size_request_words(0),
+  _aligned_datum_size_bytes(0) {}
+
+JfrVirtualMemory::~JfrVirtualMemory() {
+  assert(_vmm != NULL, "invariant");
+  delete _vmm;
+}
+
+size_t JfrVirtualMemory::aligned_datum_size_bytes() const {
+  return _aligned_datum_size_bytes;
+}
+
+static void adjust_allocation_ratio(size_t* const reservation_size_bytes, size_t* const commit_size_bytes) {
+  assert(reservation_size_bytes != NULL, "invariant");
+  assert(*reservation_size_bytes > 0, "invariant");
+  assert(commit_size_bytes != NULL, "invariant");
+  assert(*commit_size_bytes > 0, "invariant");
+  assert(*reservation_size_bytes >= *commit_size_bytes, "invariant");
+  assert(is_aligned(*reservation_size_bytes, os::vm_allocation_granularity()), "invariant");
+  assert(is_aligned(*commit_size_bytes, os::vm_allocation_granularity()), "invariant");
+
+  size_t reservation_size_units = *reservation_size_bytes / os::vm_allocation_granularity();
+  size_t commit_size_units = *commit_size_bytes / os::vm_allocation_granularity();
+  assert(reservation_size_units > 0, "invariant");
+  assert(commit_size_units > 0, "invariant");
+
+  size_t original_ratio_units = reservation_size_units / commit_size_units;
+  size_t rem = reservation_size_units % commit_size_units;
+  assert(original_ratio_units > 0, "invariant");
+
+  if (rem > 0) {
+    reservation_size_units -= rem % original_ratio_units;
+    commit_size_units += rem / original_ratio_units;
+  }
+
+  assert(commit_size_units > 0, "invariant");
+  assert(reservation_size_units % original_ratio_units == 0, "invariant");
+  assert(original_ratio_units * commit_size_units == reservation_size_units , "invariant");
+  assert(original_ratio_units == reservation_size_units / commit_size_units, "invariant");
+  *reservation_size_bytes = reservation_size_units * os::vm_allocation_granularity();
+  *commit_size_bytes = commit_size_units * os::vm_allocation_granularity();
+  assert((*reservation_size_bytes % *commit_size_bytes) == 0, "invariant");
+}
+
+
+void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes,
+                                   size_t block_size_request_bytes,
+                                   size_t datum_size_bytes /* 1 */) {
+  assert(_vmm == NULL, "invariant");
+  _vmm = new JfrVirtualMemoryManager();
+
+  if (_vmm == NULL) {
+    return NULL;
+  }
+
+  assert(reservation_size_request_bytes > 0, "invariant");
+  _aligned_datum_size_bytes = align_up(datum_size_bytes, BytesPerWord);
+  assert(is_aligned(_aligned_datum_size_bytes, BytesPerWord), "invariant");
+
+  reservation_size_request_bytes = ReservedSpace::allocation_align_size_up(reservation_size_request_bytes);
+  assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
+  assert(is_aligned(reservation_size_request_bytes, _aligned_datum_size_bytes), "invariant");
+  block_size_request_bytes = MAX2(block_size_request_bytes, (size_t)os::vm_allocation_granularity());
+  block_size_request_bytes = ReservedSpace::allocation_align_size_up(block_size_request_bytes);
+  assert(is_aligned(block_size_request_bytes, os::vm_allocation_granularity()), "invariant");
+  assert(is_aligned(block_size_request_bytes, _aligned_datum_size_bytes), "invariant");
+  // adjustment to valid ratio in units of vm_allocation_granularity
+  adjust_allocation_ratio(&reservation_size_request_bytes, &block_size_request_bytes);
+  assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
+  assert(is_aligned(reservation_size_request_bytes, _aligned_datum_size_bytes), "invariant");
+  assert(is_aligned(block_size_request_bytes, os::vm_allocation_granularity()), "invariant");
+  assert(is_aligned(block_size_request_bytes, _aligned_datum_size_bytes), "invariant");
+  assert((reservation_size_request_bytes % block_size_request_bytes) == 0, "invariant");
+  const size_t reservation_size_request_words = reservation_size_request_bytes / BytesPerWord;
+  _physical_commit_size_request_words = block_size_request_bytes / BytesPerWord;
+  // virtual memory reservation
+  if (!_vmm->initialize(reservation_size_request_words)) {
+    // is implicitly "full" if reservation fails
+    assert(is_full(), "invariant");
+    return NULL;
+  }
+  _reserved_low = (const u1*)_vmm->reserved_low();
+  _reserved_high = (const u1*)_vmm->reserved_high();
+  // reservation complete
+  _top = (u1*)_vmm->committed_high();
+  _commit_point = _top;
+  assert(_reserved_low == _top, "invariant"); // initial empty state
+  assert((size_t)(_reserved_high - _reserved_low) == reservation_size_request_bytes, "invariant");
+  // initial commit
+  commit_memory_block();
+  return _top;
+}
+
+void* JfrVirtualMemory::commit(size_t block_size_request_words) {
+  assert(_vmm != NULL, "invariant");
+  assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
+  return _vmm->commit(block_size_request_words);
+}
+
+bool JfrVirtualMemory::is_full() const {
+  return _top == _reserved_high;
+}
+
+bool JfrVirtualMemory::is_empty() const {
+  return _top == _reserved_low;
+}
+
+bool JfrVirtualMemory::commit_memory_block() {
+  assert(_vmm != NULL, "invariant");
+  assert(!is_full(), "invariant");
+  assert(_top == _commit_point, "invariant");
+
+  void* const block = _vmm->commit(_physical_commit_size_request_words);
+  if (block != NULL) {
+    _commit_point = _vmm->committed_high();
+    return true;
+  }
+  // all reserved virtual memory is committed
+  assert(block == NULL, "invariant");
+  assert(_vmm->reserved_high() == _vmm->committed_high(), "invariant");
+  return false;
+}
+
+void* JfrVirtualMemory::new_datum() {
+  assert(_vmm != NULL, "invariant");
+  assert(!is_full(), "invariant");
+  if (_top == _commit_point) {
+    if (!commit_memory_block()) {
+      assert(is_full(), "invariant");
+      return NULL;
+    }
+  }
+  assert(_top + _aligned_datum_size_bytes <= _commit_point, "invariant");
+  u1* allocation = _top;
+  _top += _aligned_datum_size_bytes;
+  assert(is_aligned(allocation, _aligned_datum_size_bytes), "invariant");
+  return allocation;
+}
+
+void* JfrVirtualMemory::index_ptr(size_t index) {
+  assert((index * _aligned_datum_size_bytes) + _reserved_low < _commit_point, "invariant");
+  return (void*)((index * _aligned_datum_size_bytes) + _reserved_low);
+}
+
+void* JfrVirtualMemory::get(size_t index) {
+  return index_ptr(index);
+}
+
+size_t JfrVirtualMemory::count() const {
+  return (_top - _reserved_low) / _aligned_datum_size_bytes;
+}
+
+size_t JfrVirtualMemory::live_set() const {
+  return _top - _reserved_low;
+}
+
+size_t JfrVirtualMemory::reserved_size() const {
+  return _reserved_high - _reserved_low;
+}
+
+bool JfrVirtualMemory::compact(size_t index) {
+  assert(index > 0, "invariant");
+  assert(index <= reserved_size(), "invariant");
+  const u1* low = static_cast<u1*>(index_ptr(index));
+  const size_t block_size = _top - low;
+  memcpy(const_cast<u1*>(_reserved_low), low, block_size);
+  _top = const_cast<u1*>(_reserved_low) + block_size;
+  assert(live_set() == block_size, "invariant");
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/storage/jfrVirtualMemory.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRVIRTUALMEMORY_HPP
+#define SHARE_VM_JFR_RECORDER_STORAGE_JFRVIRTUALMEMORY_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class JfrVirtualMemoryManager;
+
+class JfrVirtualMemory : public JfrCHeapObj {
+ private:
+  JfrVirtualMemoryManager* _vmm;
+  const u1* _reserved_low; // lowest address of reservation
+  const u1* _reserved_high; // highest address of reservation
+  u1* _top; // current allocation address
+  const u1* _commit_point; // synch points for committing new memory
+  size_t _physical_commit_size_request_words; // aligned to os::vm_allocation_granularity()
+  size_t _aligned_datum_size_bytes; // datum alignment
+
+  bool commit_memory_block();
+  void* commit(size_t block_size_request_words);
+  void* index_ptr(size_t index); // index to address map
+
+ public:
+  JfrVirtualMemory();
+  ~JfrVirtualMemory();
+
+  // initialization will do the reservation and return it
+  void* initialize(size_t reservation_size_request_bytes, size_t block_size_request_bytes, size_t datum_size_bytes = 1);
+
+  void* new_datum(); // datum oriented allocation
+  void* get(size_t index); // direct access retrieval
+  size_t aligned_datum_size_bytes() const;
+
+  bool is_full() const; // limit of reservation committed and in use
+  bool is_empty() const;
+
+  size_t count() const; // how many
+  size_t live_set() const; // how much resident memory (actually in use)
+  size_t reserved_size() const; // size of reservation
+  bool compact(size_t index);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRVIRTUALMEMORY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stringpool/jfrStringPool.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
+#include "jfr/recorder/stringpool/jfrStringPool.hpp"
+#include "jfr/recorder/stringpool/jfrStringPoolWriter.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+
+typedef JfrStringPool::Buffer* BufferPtr;
+
+static JfrStringPool* _instance = NULL;
+
+JfrStringPool& JfrStringPool::instance() {
+  return *_instance;
+}
+
+JfrStringPool* JfrStringPool::create(JfrChunkWriter& cw) {
+  assert(_instance == NULL, "invariant");
+  _instance = new JfrStringPool(cw);
+  return _instance;
+}
+
+void JfrStringPool::destroy() {
+  assert(_instance != NULL, "invariant");
+  delete _instance;
+  _instance = NULL;
+}
+
+JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _free_list_mspace(NULL), _lock(NULL), _chunkwriter(cw) {}
+
+JfrStringPool::~JfrStringPool() {
+  if (_free_list_mspace != NULL) {
+    delete _free_list_mspace;
+  }
+  if (_lock != NULL) {
+    delete _lock;
+  }
+}
+
+static const size_t unlimited_mspace_size = 0;
+static const size_t string_pool_cache_count = 2;
+static const size_t string_pool_buffer_size = 512 * K;
+
+bool JfrStringPool::initialize() {
+  assert(_free_list_mspace == NULL, "invariant");
+  _free_list_mspace = new JfrStringPoolMspace(string_pool_buffer_size, unlimited_mspace_size, string_pool_cache_count, this);
+  if (_free_list_mspace == NULL || !_free_list_mspace->initialize()) {
+    return false;
+  }
+  assert(_lock == NULL, "invariant");
+  _lock = new Mutex(Monitor::leaf - 1, "Checkpoint mutex", Mutex::_allow_vm_block_flag);
+  return _lock != NULL;
+}
+
+/*
+* If the buffer was a "lease" from the global system, release back.
+*
+* The buffer is effectively invalidated for the thread post-return,
+* and the caller should take means to ensure that it is not referenced any longer.
+*/
+static void release(BufferPtr buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  assert(buffer->lease(), "invariant");
+  assert(buffer->acquired_by_self(), "invariant");
+  buffer->clear_lease();
+  buffer->release();
+}
+
+BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
+  assert(old != NULL, "invariant");
+  assert(old->lease(), "invariant");
+  if (0 == requested) {
+    // indicates a lease is being returned
+    release(old, thread);
+    return NULL;
+  }
+  // migration of in-flight information
+  BufferPtr const new_buffer = lease_buffer(thread, used + requested);
+  if (new_buffer != NULL) {
+    migrate_outstanding_writes(old, new_buffer, used, requested);
+  }
+  release(old, thread);
+  return new_buffer; // might be NULL
+}
+
+static const size_t lease_retry = 10;
+
+BufferPtr JfrStringPool::lease_buffer(Thread* thread, size_t size /* 0 */) {
+  BufferPtr buffer = mspace_get_free_lease_with_retry(size, instance()._free_list_mspace, lease_retry, thread);
+  if (buffer == NULL) {
+    buffer = mspace_allocate_transient_lease_to_free(size,  instance()._free_list_mspace, thread);
+  }
+  assert(buffer->acquired_by_self(), "invariant");
+  assert(buffer->lease(), "invariant");
+  return buffer;
+}
+
+bool JfrStringPool::add(bool epoch, jlong id, jstring string, JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  const bool current_epoch = (JfrTraceIdEpoch::epoch() != 0);
+  if (current_epoch == epoch) {
+    JfrStringPoolWriter writer(jt);
+    writer.write(id);
+    writer.write(string);
+    writer.inc_nof_strings();
+  }
+  return current_epoch;
+}
+
+class StringPoolWriteOp  {
+ public:
+  typedef JfrStringPoolBuffer Type;
+ private:
+  UnBufferedWriteToChunk<Type> _writer;
+  Thread* _thread;
+  size_t _strings_processed;
+ public:
+  StringPoolWriteOp(JfrChunkWriter& writer, Thread* thread) : _writer(writer), _thread(thread), _strings_processed(0) {}
+  bool write(Type* buffer, const u1* data, size_t size) {
+    buffer->acquire(_thread); // blocking
+    const uint64_t nof_strings_used = buffer->string_count();
+    assert(nof_strings_used > 0, "invariant");
+    buffer->set_string_top(buffer->string_top() + nof_strings_used);
+    // "size processed" for string pool buffers is the number of processed string elements
+    _strings_processed += nof_strings_used;
+    const bool ret = _writer.write(buffer, data, size);
+    buffer->release();
+    return ret;
+  }
+  size_t processed() { return _strings_processed; }
+};
+
+typedef StringPoolWriteOp WriteOperation;
+typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
+
+size_t JfrStringPool::write() {
+  Thread* const thread = Thread::current();
+  WriteOperation wo(_chunkwriter, thread);
+  ConcurrentWriteOperation cwo(wo);
+  assert(_free_list_mspace->is_full_empty(), "invariant");
+  process_free_list(cwo, _free_list_mspace);
+  return wo.processed();
+}
+
+typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
+typedef ReleaseOp<JfrStringPoolMspace> StringPoolReleaseOperation;
+typedef CompositeOperation<MutexedWriteOperation, StringPoolReleaseOperation> StringPoolWriteOperation;
+
+size_t JfrStringPool::write_at_safepoint() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  Thread* const thread = Thread::current();
+  WriteOperation wo(_chunkwriter, thread);
+  MutexedWriteOperation mwo(wo);
+  StringPoolReleaseOperation spro(_free_list_mspace, thread, false);
+  StringPoolWriteOperation spwo(&mwo, &spro);
+  assert(_free_list_mspace->is_full_empty(), "invariant");
+  process_free_list(spwo, _free_list_mspace);
+  return wo.processed();
+}
+
+class StringPoolBufferDiscarder {
+ private:
+  Thread* _thread;
+  size_t _processed;
+ public:
+  typedef JfrStringPoolBuffer Type;
+  StringPoolBufferDiscarder() : _thread(Thread::current()), _processed(0) {}
+  bool process(Type* buffer) {
+    buffer->acquire(_thread); // serialized access
+    const u1* const current_top = buffer->top();
+    const size_t unflushed_size = buffer->pos() - current_top;
+    if (unflushed_size == 0) {
+      assert(buffer->string_count() == 0, "invariant");
+      buffer->release();
+      return true;
+    }
+    buffer->set_top(current_top + unflushed_size);
+    const uint64_t nof_strings_used = buffer->string_count();
+    buffer->set_string_top(buffer->string_top() + nof_strings_used);
+    // "size processed" for string pool buffers is the number of string elements
+    _processed += (size_t)nof_strings_used;
+    buffer->release();
+    return true;
+  }
+  size_t processed() const { return _processed; }
+};
+
+size_t JfrStringPool::clear() {
+  StringPoolBufferDiscarder discard_operation;
+  assert(_free_list_mspace->is_full_empty(), "invariant");
+  process_free_list(discard_operation, _free_list_mspace);
+  return discard_operation.processed();
+}
+
+void JfrStringPool::register_full(BufferPtr t, Thread* thread) {
+  // nothing here at the moment
+  assert(t->retired(), "invariant");
+}
+
+void JfrStringPool::lock() {
+  assert(!_lock->owned_by_self(), "invariant");
+  _lock->lock_without_safepoint_check();
+}
+
+void JfrStringPool::unlock() {
+  _lock->unlock();
+}
+
+#ifdef ASSERT
+bool JfrStringPool::is_locked() const {
+  return _lock->owned_by_self();
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stringpool/jfrStringPool.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOL_HPP
+#define SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOL_HPP
+
+#include "jni.h"
+#include "jfr/recorder/storage/jfrMemorySpace.hpp"
+#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
+#include "jfr/recorder/stringpool/jfrStringPoolBuffer.hpp"
+
+class JfrChunkWriter;
+class JfrStringPool;
+class Mutex;
+
+typedef JfrMemorySpace<JfrStringPoolBuffer, JfrMspaceSequentialRetrieval, JfrStringPool> JfrStringPoolMspace;
+
+//
+// Although called JfrStringPool, a more succinct description would be
+// "backing storage for the string pool located in Java"
+//
+// There are no lookups in native, only the encoding of string constants to the stream.
+//
+class JfrStringPool : public JfrCHeapObj {
+ public:
+  static bool add(bool epoch, jlong id, jstring string, JavaThread* jt);
+  size_t write();
+  size_t write_at_safepoint();
+  size_t clear();
+
+  typedef JfrStringPoolMspace::Type Buffer;
+ private:
+  JfrStringPoolMspace* _free_list_mspace;
+  Mutex* _lock;
+  JfrChunkWriter& _chunkwriter;
+
+  // mspace callback
+  void register_full(Buffer* t, Thread* thread);
+  void lock();
+  void unlock();
+  DEBUG_ONLY(bool is_locked() const;)
+
+  static Buffer* lease_buffer(Thread* thread, size_t size = 0);
+  static Buffer* flush(Buffer* old, size_t used, size_t requested, Thread* t);
+
+  JfrStringPool(JfrChunkWriter& cw);
+  ~JfrStringPool();
+
+  static JfrStringPool& instance();
+  static JfrStringPool* create(JfrChunkWriter& cw);
+  bool initialize();
+  static void destroy();
+
+  friend class JfrRecorder;
+  friend class JfrRecorderService;
+  friend class JfrStringPoolFlush;
+  friend class JfrStringPoolWriter;
+  template <typename, template <typename> class, typename>
+  friend class JfrMemorySpace;
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stringpool/jfrStringPoolBuffer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/stringpool/jfrStringPoolBuffer.hpp"
+
+JfrStringPoolBuffer::JfrStringPoolBuffer() : JfrBuffer(), _string_count_pos(0), _string_count_top(0) {}
+
+void JfrStringPoolBuffer::reinitialize() {
+  assert(acquired_by_self() || retired(), "invariant");
+  concurrent_top();
+  set_pos((start()));
+  set_string_pos(0);
+  set_string_top(0);
+  set_concurrent_top(start());
+}
+
+uint64_t JfrStringPoolBuffer::string_pos() const {
+  assert(acquired_by_self() || retired(), "invariant");
+  return _string_count_pos;
+}
+
+uint64_t JfrStringPoolBuffer::string_top() const {
+  assert(acquired_by_self() || retired(), "invariant");
+  return _string_count_top;
+}
+
+uint64_t JfrStringPoolBuffer::string_count() const {
+  assert(acquired_by_self() || retired(), "invariant");
+  return string_pos() - string_top();
+}
+
+void JfrStringPoolBuffer::set_string_pos(uint64_t value) {
+  assert(acquired_by_self() || retired(), "invariant");
+  _string_count_pos = value;
+}
+
+void JfrStringPoolBuffer::increment(uint64_t value) {
+  assert(acquired_by_self() || retired(), "invariant");
+  ++_string_count_pos;
+}
+
+void JfrStringPoolBuffer::set_string_top(uint64_t value) {
+  assert(acquired_by_self() || retired(), "invariant");
+  _string_count_top = value;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stringpool/jfrStringPoolBuffer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLBUFFER_HPP
+#define SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLBUFFER_HPP
+
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+
+class JfrStringPoolBuffer : public JfrBuffer {
+ private:
+  uint64_t _string_count_pos;
+  uint64_t _string_count_top;
+
+ public:
+  JfrStringPoolBuffer();
+  void reinitialize();
+  uint64_t string_pos() const;
+  uint64_t string_top() const;
+  uint64_t string_count() const;
+  void increment(uint64_t value);
+  void set_string_pos(uint64_t value);
+  void set_string_top(uint64_t value);
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLBUFFER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stringpool/jfrStringPoolWriter.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/stringpool/jfrStringPool.hpp"
+#include "jfr/recorder/stringpool/jfrStringPoolWriter.hpp"
+#include "jfr/writers/jfrEventWriterHost.inline.hpp"
+#include "jfr/writers/jfrMemoryWriterHost.inline.hpp"
+
+JfrStringPoolFlush::JfrStringPoolFlush(Type* old, size_t used, size_t requested, Thread* t) :
+  _result(JfrStringPool::flush(old, used, requested, t)) {}
+
+JfrStringPoolWriter::JfrStringPoolWriter(Thread* thread) :
+  JfrStringPoolWriterBase(JfrStringPool::lease_buffer(thread), thread), _nof_strings(0) {}
+
+JfrStringPoolWriter::~JfrStringPoolWriter() {
+  assert(this->is_acquired(), "invariant");
+  if (!this->is_valid() || this->used_size() == 0) {
+    return;
+  }
+  assert(this->used_size() > 0, "invariant");
+  this->storage()->increment(_nof_strings);
+  this->commit();
+  assert(0 == this->current_offset(), "invariant");
+}
+
+void JfrStringPoolWriter::inc_nof_strings() {
+  ++_nof_strings;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/recorder/stringpool/jfrStringPoolWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLWRITER_HPP
+#define SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLWRITER_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/recorder/stringpool/jfrStringPoolBuffer.hpp"
+#include "jfr/writers/jfrEventWriterHost.hpp"
+#include "jfr/writers/jfrMemoryWriterHost.hpp"
+#include "jfr/writers/jfrStorageAdapter.hpp"
+
+class Thread;
+
+class JfrStringPoolFlush : public StackObj {
+ public:
+  typedef JfrStringPoolBuffer Type;
+  JfrStringPoolFlush(Type* old, size_t used, size_t requested, Thread* t);
+  Type* result() { return _result; }
+ private:
+  Type* _result;
+};
+
+typedef Adapter<JfrStringPoolFlush> JfrStringPoolAdapter;
+typedef AcquireReleaseMemoryWriterHost<JfrStringPoolAdapter, StackObj> JfrTransactionalStringPoolWriter;
+typedef EventWriterHost<BigEndianEncoder, CompressedIntegerEncoder, JfrTransactionalStringPoolWriter> JfrStringPoolWriterBase;
+
+class JfrStringPoolWriter : public JfrStringPoolWriterBase {
+ private:
+  size_t _nof_strings;
+ public:
+  JfrStringPoolWriter(Thread* thread);
+  ~JfrStringPoolWriter();
+  void inc_nof_strings();
+};
+
+#endif // SHARE_VM_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrAllocationTracer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,43 @@
+/*
+* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#include "precompiled.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/support/jfrAllocationTracer.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "runtime/thread.hpp"
+
+JfrAllocationTracer::JfrAllocationTracer(HeapWord* obj, size_t alloc_size, Thread* thread) : _tl(NULL) {
+  if (LeakProfiler::is_running()) {
+    assert(thread->is_Java_thread(), "invariant");
+    _tl = thread->jfr_thread_local();
+    LeakProfiler::sample(obj, alloc_size, (JavaThread*)thread);
+  }
+}
+
+JfrAllocationTracer::~JfrAllocationTracer() {
+  if (_tl != NULL) {
+    _tl->clear_cached_stack_trace();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrAllocationTracer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,40 @@
+/*
+* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRALLOCATIONTRACER_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRALLOCATIONTRACER_HPP
+
+#include "memory/allocation.hpp"
+
+class JfrThreadLocal;
+
+class JfrAllocationTracer : public StackObj {
+ private:
+  JfrThreadLocal* _tl;
+ public:
+  JfrAllocationTracer(HeapWord* obj, size_t alloc_size, Thread* thread);
+  ~JfrAllocationTracer();
+};
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRALLOCATIONTRACER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrEventClass.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/support/jfrEventClass.hpp"
+
+bool JdkJfrEvent::is(const Klass* k) {
+  return JfrTraceId::is_jdk_jfr_event(k);
+}
+
+bool JdkJfrEvent::is(const jclass jc) {
+  return JfrTraceId::is_jdk_jfr_event(jc);
+}
+
+void JdkJfrEvent::tag_as(const Klass* k) {
+  JfrTraceId::tag_as_jdk_jfr_event(k);
+}
+
+bool JdkJfrEvent::is_subklass(const Klass* k) {
+  return JfrTraceId::is_jdk_jfr_event_sub(k);
+}
+
+bool JdkJfrEvent::is_subklass(const jclass jc) {
+  return JfrTraceId::is_jdk_jfr_event_sub(jc);
+}
+
+void JdkJfrEvent::tag_as_subklass(const Klass* k) {
+  JfrTraceId::tag_as_jdk_jfr_event_sub(k);
+}
+
+void JdkJfrEvent::tag_as_subklass(const jclass jc) {
+  JfrTraceId::tag_as_jdk_jfr_event_sub(jc);
+}
+
+bool JdkJfrEvent::is_a(const Klass* k) {
+  return JfrTraceId::in_jdk_jfr_event_hierarchy(k);
+}
+
+bool JdkJfrEvent::is_a(const jclass jc) {
+  return JfrTraceId::in_jdk_jfr_event_hierarchy(jc);
+}
+
+bool JdkJfrEvent::is_host(const Klass* k) {
+  return JfrTraceId::is_event_host(k);
+}
+
+bool JdkJfrEvent::is_host(const jclass jc) {
+  return JfrTraceId::is_event_host(jc);
+}
+
+void JdkJfrEvent::tag_as_host(const Klass* k) {
+  JfrTraceId::tag_as_event_host(k);
+}
+
+void JdkJfrEvent::tag_as_host(const jclass jc) {
+  JfrTraceId::tag_as_event_host(jc);
+}
+
+bool JdkJfrEvent::is_visible(const Klass* k) {
+  return JfrTraceId::in_visible_set(k);
+}
+
+bool JdkJfrEvent::is_visible(const jclass jc) {
+  return JfrTraceId::in_visible_set(jc);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrEventClass.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFREVENTCLASS_HPP
+#define SHARE_VM_JFR_SUPPORT_JFREVENTCLASS_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+
+class Klass;
+
+//
+// For convenient access to the jdk.jfr.Event klass hierarchy.
+//
+class JdkJfrEvent : AllStatic {
+ public:
+  // jdk.jfr.Event
+  static bool is(const Klass* k);
+  static bool is(const jclass jc);
+  static void tag_as(const Klass* k);
+
+  // jdk.jfr.Event subklasses
+  static bool is_subklass(const Klass* k);
+  static bool is_subklass(const jclass jc);
+  static void tag_as_subklass(const Klass* k);
+  static void tag_as_subklass(const jclass jc);
+
+  // jdk.jfr.Event hierarchy
+  static bool is_a(const Klass* k);
+  static bool is_a(const jclass jc);
+
+  // klasses that host a jdk.jfr.Event
+  static bool is_host(const Klass* k);
+  static bool is_host(const jclass jc);
+  static void tag_as_host(const Klass* k);
+  static void tag_as_host(const jclass jc);
+
+  // in the set of classes made visible to java
+  static bool is_visible(const Klass* k);
+  static bool is_visible(const jclass jc);
+};
+
+#endif // SHARE_VM_JFR_SUPPORT_JFREVENTCLASS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrFlush.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/jfrEventSetting.inline.hpp"
+#include "jfr/recorder/storage/jfrStorage.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/support/jfrFlush.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/debug.hpp"
+
+JfrFlush::JfrFlush(JfrStorage::Buffer* old, size_t used, size_t requested, Thread* t) :
+  _result(JfrStorage::flush(old, used, requested, true, t)) {
+}
+
+template <typename T>
+class LessThanHalfBufferSize : AllStatic {
+public:
+  static bool evaluate(T* t) {
+    assert(t != NULL, "invariant");
+    return t->free_size() < t->size() / 2;
+  }
+};
+
+template <typename T>
+class LessThanSize : AllStatic {
+ public:
+  static bool evaluate(T* t, size_t size) {
+    assert(t != NULL, "invariant");
+    return t->free_size() < size;
+  }
+};
+
+bool jfr_is_event_enabled(JfrEventId id) {
+  return JfrEventSetting::is_enabled(id);
+}
+
+bool jfr_has_stacktrace_enabled(JfrEventId id) {
+  return JfrEventSetting::has_stacktrace(id);
+}
+
+void jfr_conditional_flush(JfrEventId id, size_t size, Thread* t) {
+  assert(jfr_is_event_enabled(id), "invariant");
+  if (t->jfr_thread_local()->has_native_buffer()) {
+    JfrStorage::Buffer* const buffer = t->jfr_thread_local()->native_buffer();
+    if (LessThanSize<JfrStorage::Buffer>::evaluate(buffer, size)) {
+      JfrFlush f(buffer, 0, 0, t);
+    }
+  }
+}
+
+bool jfr_save_stacktrace(Thread* t) {
+  JfrThreadLocal* const tl = t->jfr_thread_local();
+  if (tl->has_cached_stack_trace()) {
+    return false; // no ownership
+  }
+  tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(t));
+  return true;
+}
+
+void jfr_clear_stacktrace(Thread* t) {
+  t->jfr_thread_local()->clear_cached_stack_trace();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrFlush.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRFLUSH_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRFLUSH_HPP
+
+#include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.hpp"
+
+class Thread;
+
+class JfrFlush : public StackObj {
+ public:
+  typedef JfrBuffer Type;
+  JfrFlush(Type* old, size_t used, size_t requested, Thread* t);
+  Type* result() const { return _result; }
+ private:
+  Type* _result;
+};
+
+void jfr_conditional_flush(JfrEventId id, size_t size, Thread* t);
+bool jfr_is_event_enabled(JfrEventId id);
+bool jfr_has_stacktrace_enabled(JfrEventId id);
+bool jfr_save_stacktrace(Thread* t);
+void jfr_clear_stacktrace(Thread* t);
+
+template <typename Event>
+class JfrConditionalFlush {
+ public:
+  typedef JfrBuffer Type;
+  JfrConditionalFlush(Thread* t) {
+    if (jfr_is_event_enabled(Event::eventId)) {
+      jfr_conditional_flush(Event::eventId, sizeof(Event), t);
+    }
+  }
+};
+
+template <typename Event>
+class JfrConditionalFlushWithStacktrace : public JfrConditionalFlush<Event> {
+  Thread* _t;
+  bool _owner;
+ public:
+  JfrConditionalFlushWithStacktrace(Thread* t) : JfrConditionalFlush<Event>(t), _t(t), _owner(false) {
+    if (Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
+      _owner = jfr_save_stacktrace(t);
+    }
+  }
+  ~JfrConditionalFlushWithStacktrace() {
+    if (_owner) {
+      jfr_clear_stacktrace(_t);
+    }
+  }
+};
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRFLUSH_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrIntrinsics.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,56 @@
+/*
+* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRINTRINSICS_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRINTRINSICS_HPP
+
+#include "utilities/macros.hpp"
+
+#if INCLUDE_JFR
+#include "jfr/support/jfrKlassExtension.hpp"
+#include "jfr/support/jfrThreadExtension.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
+
+#define JFR_TEMPLATES(template) \
+  template(jdk_jfr_internal_JVM,          "jdk/jfr/internal/JVM")
+
+#define JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)                              \
+  do_intrinsic(_counterTime,        jdk_jfr_internal_JVM, counterTime_name, void_long_signature, F_SN)       \
+    do_name(     counterTime_name,                             "counterTime")                                \
+  do_intrinsic(_getClassId,         jdk_jfr_internal_JVM, getClassId_name, class_long_signature, F_SN)       \
+    do_name(     getClassId_name,                              "getClassId")                                 \
+  do_intrinsic(_getEventWriter,   jdk_jfr_internal_JVM, getEventWriter_name, void_object_signature, F_SN)    \
+    do_name(     getEventWriter_name,                          "getEventWriter")                             \
+
+#define JFR_HAVE_INTRINSICS
+#define JFR_TIME_FUNCTION JfrTime::time_function()
+
+#else // !INCLUDE_JFR
+
+#define JFR_TEMPLATES(template)
+#define JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)
+
+#endif // INCLUDE_JFR
+#endif // SHARE_VM_JFR_SUPPORT_JFRINTRINSICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrKlassExtension.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,42 @@
+/*
+* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRKLASSEXTENSION_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRKLASSEXTENSION_HPP
+
+#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
+#include "jfr/support/jfrTraceIdExtension.hpp"
+
+#define DEFINE_KLASS_TRACE_ID_OFFSET \
+  static ByteSize trace_id_offset() { return in_ByteSize(offset_of(InstanceKlass, _trace_id)); }
+
+#define KLASS_TRACE_ID_OFFSET InstanceKlass::trace_id_offset()
+
+#define JDK_JFR_EVENT_SUBKLASS 16
+#define JDK_JFR_EVENT_KLASS    32
+#define EVENT_HOST_KLASS       64
+#define IS_EVENT_KLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)) != 0)
+#define ON_KLASS_CREATION(k, p, t) if (IS_EVENT_KLASS(k)) JfrEventClassTransformer::on_klass_creation(k, p, t)
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRKLASSEXTENSION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrStackTraceMark.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/jfrEventSetting.inline.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/support/jfrStackTraceMark.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "runtime/thread.inline.hpp"
+
+JfrStackTraceMark::JfrStackTraceMark() : _t(Thread::current()), _previous_id(0), _previous_hash(0) {
+  JfrThreadLocal* const tl = _t->jfr_thread_local();
+  if (tl->has_cached_stack_trace()) {
+    _previous_id = tl->cached_stack_trace_id();
+    _previous_hash = tl->cached_stack_trace_hash();
+  }
+  tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(Thread::current()));
+}
+
+JfrStackTraceMark::JfrStackTraceMark(Thread* t) : _t(t), _previous_id(0), _previous_hash(0) {
+  JfrThreadLocal* const tl = _t->jfr_thread_local();
+  if (tl->has_cached_stack_trace()) {
+    _previous_id = tl->cached_stack_trace_id();
+    _previous_hash = tl->cached_stack_trace_hash();
+  }
+  tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(t));
+}
+
+JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId) : _t(NULL), _previous_id(0), _previous_hash(0) {
+  if (JfrEventSetting::has_stacktrace(eventId)) {
+    _t = Thread::current();
+    JfrThreadLocal* const tl = _t->jfr_thread_local();
+    if (tl->has_cached_stack_trace()) {
+      _previous_id = tl->cached_stack_trace_id();
+      _previous_hash = tl->cached_stack_trace_hash();
+    }
+    tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t));
+  }
+}
+
+JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId, Thread* t) : _t(NULL), _previous_id(0), _previous_hash(0) {
+  if (JfrEventSetting::has_stacktrace(eventId)) {
+    _t = t;
+    JfrThreadLocal* const tl = _t->jfr_thread_local();
+    if (tl->has_cached_stack_trace()) {
+      _previous_id = tl->cached_stack_trace_id();
+      _previous_hash = tl->cached_stack_trace_hash();
+    }
+    tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t));
+  }
+}
+
+JfrStackTraceMark::~JfrStackTraceMark() {
+  if (_previous_id != 0) {
+    _t->jfr_thread_local()->set_cached_stack_trace_id(_previous_id, _previous_hash);
+  } else {
+    if (_t != NULL) {
+      _t->jfr_thread_local()->clear_cached_stack_trace();
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrStackTraceMark.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRSTACKTRACEMARK_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRSTACKTRACEMARK_HPP
+
+#include "memory/allocation.hpp"
+#include "jfrfiles/jfrEventIds.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+class Thread;
+
+class JfrStackTraceMark {
+ private:
+  Thread* _t;
+  traceid _previous_id;
+  unsigned int _previous_hash;
+ public:
+  JfrStackTraceMark();
+  JfrStackTraceMark(Thread* t);
+  JfrStackTraceMark(JfrEventId eventId);
+  JfrStackTraceMark(JfrEventId eventId, Thread* t);
+  ~JfrStackTraceMark();
+};
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRSTACKTRACEMARK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrThreadExtension.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,52 @@
+/*
+* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRTHREADEXTENSION_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRTHREADEXTENSION_HPP
+
+#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+
+#define DEFINE_THREAD_LOCAL_FIELD_JFR mutable JfrThreadLocal _jfr_thread_local
+
+#define DEFINE_THREAD_LOCAL_OFFSET_JFR \
+  static ByteSize jfr_thread_local_offset() { return in_ByteSize(offset_of(Thread, _jfr_thread_local)); }
+
+#define THREAD_LOCAL_OFFSET_JFR Thread::jfr_thread_local_offset()
+
+#define DEFINE_THREAD_LOCAL_TRACE_ID_OFFSET_JFR \
+  static ByteSize trace_id_offset() { return in_ByteSize(offset_of(JfrThreadLocal, _trace_id)); }
+
+#define DEFINE_THREAD_LOCAL_ACCESSOR_JFR \
+  JfrThreadLocal* jfr_thread_local() const { return &_jfr_thread_local; }
+
+#define THREAD_ID_OFFSET_JFR JfrThreadLocal::trace_id_offset()
+
+#define THREAD_LOCAL_WRITER_OFFSET_JFR \
+  JfrThreadLocal::java_event_writer_offset() + THREAD_LOCAL_OFFSET_JFR
+
+// XXX consider implementing thread suspend tracing
+#define SUSPEND_THREAD_CONDITIONAL(thread) if (false/*(thread)->is_trace_suspend()*/) JfrThreadSampling::on_javathread_suspend(thread)
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRTHREADEXTENSION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrThreadId.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,41 @@
+/*
+* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRTHREADID_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRTHREADID_HPP
+
+#include "utilities/macros.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#if INCLUDE_JFR
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#define JFR_THREAD_ID(thread) ((thread)->jfr_thread_local()->thread_id())
+#else
+typedef u8 traceid;
+#define JFR_THREAD_ID(thread) ((traceid)(thread)->osthread()->thread_id())
+#endif
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRTHREADID_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrThreadLocal.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/storage/jfrStorage.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+
+/* This data structure is per thread and only accessed by the thread itself, no locking required */
+JfrThreadLocal::JfrThreadLocal() :
+  _java_event_writer(NULL),
+  _java_buffer(NULL),
+  _native_buffer(NULL),
+  _shelved_buffer(NULL),
+  _stackframes(NULL),
+  _trace_id(JfrTraceId::assign_thread_id()),
+  _thread_cp(),
+  _data_lost(0),
+  _stack_trace_id(max_julong),
+  _user_time(0),
+  _cpu_time(0),
+  _wallclock_time(os::javaTimeNanos()),
+  _stack_trace_hash(0),
+  _stackdepth(0),
+  _entering_suspend_flag(0),
+  _dead(false) {}
+
+u8 JfrThreadLocal::add_data_lost(u8 value) {
+  _data_lost += value;
+  return _data_lost;
+}
+
+bool JfrThreadLocal::has_thread_checkpoint() const {
+  return _thread_cp.valid();
+}
+
+void JfrThreadLocal::set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
+  assert(!_thread_cp.valid(), "invariant");
+  _thread_cp = ref;
+}
+
+const JfrCheckpointBlobHandle& JfrThreadLocal::thread_checkpoint() const {
+  return _thread_cp;
+}
+
+void JfrThreadLocal::set_dead() {
+  assert(!is_dead(), "invariant");
+  _dead = true;
+}
+
+void JfrThreadLocal::on_exit(JavaThread* thread) {
+  if (JfrRecorder::is_recording()) {
+    JfrCheckpointManager::write_thread_checkpoint(thread);
+    JfrThreadCPULoadEvent::send_event_for_thread(thread);
+  }
+  thread->jfr_thread_local()->set_dead();
+}
+
+void JfrThreadLocal::on_destruct(Thread* thread) {
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  if (tl->has_native_buffer()) {
+    release(tl->native_buffer(), thread);
+  }
+  if (tl->has_java_buffer()) {
+    release(tl->java_buffer(), thread);
+  }
+  assert(tl->shelved_buffer() == NULL, "invariant");
+  if (thread->jfr_thread_local()->has_java_event_writer()) {
+    JfrJavaSupport::destroy_global_jni_handle(tl->java_event_writer());
+  }
+  destroy_stackframes(thread);
+}
+
+JfrBuffer* JfrThreadLocal::acquire(Thread* thread, size_t size) {
+  return JfrStorage::acquire_thread_local(thread, size);
+}
+
+void JfrThreadLocal::release(JfrBuffer* buffer, Thread* thread) {
+  assert(buffer != NULL, "invariant");
+  JfrStorage::release_thread_local(buffer, thread);
+}
+
+JfrBuffer* JfrThreadLocal::install_native_buffer() const {
+  assert(!has_native_buffer(), "invariant");
+  _native_buffer = acquire(Thread::current());
+  return _native_buffer;
+}
+
+JfrBuffer* JfrThreadLocal::install_java_buffer() const {
+  assert(!has_java_buffer(), "invariant");
+  assert(!has_java_event_writer(), "invariant");
+  _java_buffer = acquire(Thread::current());
+  return _java_buffer;
+}
+
+JfrStackFrame* JfrThreadLocal::install_stackframes() const {
+  assert(_stackframes == NULL, "invariant");
+  _stackdepth = (u4)JfrOptionSet::stackdepth();
+  guarantee(_stackdepth > 0, "Stackdepth must be > 0");
+  _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing);
+  return _stackframes;
+}
+
+void JfrThreadLocal::destroy_stackframes(Thread* thread) {
+  assert(thread != NULL, "invariant");
+  JfrStackFrame* frames = thread->jfr_thread_local()->stackframes();
+  if (frames != NULL) {
+    FREE_C_HEAP_ARRAY(JfrStackFrame, frames, mtTracing);
+    thread->jfr_thread_local()->set_stackframes(NULL);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrThreadLocal.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRTHREADLOCAL_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRTHREADLOCAL_HPP
+
+#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "utilities/sizes.hpp"
+
+class JavaThread;
+class JfrBuffer;
+class JfrStackFrame;
+
+class JfrThreadLocal {
+ private:
+  jobject _java_event_writer;
+  mutable JfrBuffer* _java_buffer;
+  mutable JfrBuffer* _native_buffer;
+  JfrBuffer* _shelved_buffer;
+  mutable JfrStackFrame* _stackframes;
+  mutable traceid _trace_id;
+  JfrCheckpointBlobHandle _thread_cp;
+  u8 _data_lost;
+  traceid _stack_trace_id;
+  jlong _user_time;
+  jlong _cpu_time;
+  jlong _wallclock_time;
+  unsigned int _stack_trace_hash;
+  mutable u4 _stackdepth;
+  volatile jint _entering_suspend_flag;
+  bool _dead;
+
+  JfrBuffer* install_native_buffer() const;
+  JfrBuffer* install_java_buffer() const;
+  JfrStackFrame* install_stackframes() const;
+
+  void set_dead();
+
+ public:
+  JfrThreadLocal();
+
+  JfrBuffer* native_buffer() const {
+    return _native_buffer != NULL ? _native_buffer : install_native_buffer();
+  }
+
+  bool has_native_buffer() const {
+    return _native_buffer != NULL;
+  }
+
+  void set_native_buffer(JfrBuffer* buffer) {
+    _native_buffer = buffer;
+  }
+
+  JfrBuffer* java_buffer() const {
+    return _java_buffer != NULL ? _java_buffer : install_java_buffer();
+  }
+
+  bool has_java_buffer() const {
+    return _java_buffer != NULL;
+  }
+
+  void set_java_buffer(JfrBuffer* buffer) {
+    _java_buffer = buffer;
+  }
+
+  JfrBuffer* shelved_buffer() const {
+    return _shelved_buffer;
+  }
+
+  void shelve_buffer(JfrBuffer* buffer) {
+    _shelved_buffer = buffer;
+  }
+
+  bool has_java_event_writer() const {
+    return _java_event_writer != NULL;
+  }
+
+  jobject java_event_writer() {
+    return _java_event_writer;
+  }
+
+  void set_java_event_writer(jobject java_event_writer) {
+    _java_event_writer = java_event_writer;
+  }
+
+  JfrStackFrame* stackframes() const {
+    return _stackframes != NULL ? _stackframes : install_stackframes();
+  }
+
+  void set_stackframes(JfrStackFrame* frames) {
+    _stackframes = frames;
+  }
+
+  u4 stackdepth() const {
+    return _stackdepth;
+  }
+
+  void set_stackdepth(u4 depth) {
+    _stackdepth = depth;
+  }
+
+  traceid thread_id() const {
+    return _trace_id;
+  }
+
+  void set_thread_id(traceid thread_id) {
+    _trace_id = thread_id;
+  }
+
+  void set_cached_stack_trace_id(traceid id, unsigned int hash = 0) {
+    _stack_trace_id = id;
+    _stack_trace_hash = hash;
+  }
+
+  bool has_cached_stack_trace() const {
+    return _stack_trace_id != max_julong;
+  }
+
+  void clear_cached_stack_trace() {
+    _stack_trace_id = max_julong;
+    _stack_trace_hash = 0;
+  }
+
+  traceid cached_stack_trace_id() const {
+    return _stack_trace_id;
+  }
+
+  unsigned int cached_stack_trace_hash() const {
+    return _stack_trace_hash;
+  }
+
+  void set_trace_block() {
+    _entering_suspend_flag = 1;
+  }
+
+  void clear_trace_block() {
+    _entering_suspend_flag = 0;
+  }
+
+  bool is_trace_block() const {
+    return _entering_suspend_flag != 0;
+  }
+
+  u8 data_lost() const {
+    return _data_lost;
+  }
+
+  u8 add_data_lost(u8 value);
+
+  jlong get_user_time() const {
+    return _user_time;
+  }
+
+  void set_user_time(jlong user_time) {
+    _user_time = user_time;
+  }
+
+  jlong get_cpu_time() const {
+    return _cpu_time;
+  }
+
+  void set_cpu_time(jlong cpu_time) {
+    _cpu_time = cpu_time;
+  }
+
+  jlong get_wallclock_time() const {
+    return _wallclock_time;
+  }
+
+  void set_wallclock_time(jlong wallclock_time) {
+    _wallclock_time = wallclock_time;
+  }
+
+  traceid trace_id() const {
+    return _trace_id;
+  }
+
+  traceid* const trace_id_addr() const {
+    return &_trace_id;
+  }
+
+  void set_trace_id(traceid id) const {
+    _trace_id = id;
+  }
+
+  bool is_dead() const {
+    return _dead;
+  }
+
+  bool has_thread_checkpoint() const;
+  void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle);
+  const JfrCheckpointBlobHandle& thread_checkpoint() const;
+
+  static JfrBuffer* acquire(Thread* t, size_t size = 0);
+  static void release(JfrBuffer* buffer, Thread* t);
+  static void destroy_stackframes(Thread* t);
+  static void on_exit(JavaThread* t);
+  static void on_destruct(Thread* t);
+
+  // Code generation
+  static ByteSize trace_id_offset() {
+    return in_ByteSize(offset_of(JfrThreadLocal, _trace_id));
+  }
+
+  static ByteSize java_event_writer_offset() {
+    return in_ByteSize(offset_of(JfrThreadLocal, _java_event_writer));
+  }
+};
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRTHREADLOCAL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/support/jfrTraceIdExtension.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,81 @@
+/*
+* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP
+#define SHARE_VM_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP
+
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp"
+
+#define DEFINE_TRACE_ID_FIELD mutable traceid _trace_id
+
+#define DEFINE_TRACE_ID_METHODS \
+  traceid trace_id() const { return _trace_id; } \
+  traceid* const trace_id_addr() const { return &_trace_id; } \
+  void set_trace_id(traceid id) const { _trace_id = id; }
+
+#define DEFINE_TRACE_ID_SIZE \
+  static size_t trace_id_size() { return sizeof(traceid); }
+
+#define INIT_ID(data) JfrTraceId::assign(data)
+#define REMOVE_ID(k) JfrTraceId::remove(k);
+#define RESTORE_ID(k) JfrTraceId::restore(k);
+
+class JfrTraceFlag {
+ private:
+  mutable jbyte _flags;
+ public:
+  JfrTraceFlag() : _flags(0) {}
+  explicit JfrTraceFlag(jbyte flags) : _flags(flags) {}
+  void set_flag(jbyte flag) const {
+    _flags |= flag;
+  }
+  void clear_flag(jbyte flag) const {
+    _flags &= (~flag);
+  }
+  jbyte flags() const { return _flags; }
+  bool is_set(jbyte flag) const {
+    return (_flags & flag) != 0;
+  }
+  jbyte* const flags_addr() const {
+    return &_flags;
+  }
+};
+
+#define DEFINE_TRACE_FLAG mutable JfrTraceFlag _trace_flags
+
+#define DEFINE_TRACE_FLAG_ACCESSOR                 \
+  void set_trace_flag(jbyte flag) const {          \
+    _trace_flags.set_flag(flag);                   \
+  }                                                \
+  jbyte trace_flags() const {                      \
+    return _trace_flags.flags();                   \
+  }                                                \
+  bool is_trace_flag_set(jbyte flag) const {       \
+    return _trace_flags.is_set(flag);              \
+  }                                                \
+  jbyte* const trace_flags_addr() const {          \
+    return _trace_flags.flags_addr();              \
+  }
+
+#endif // SHARE_VM_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrAllocation.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/vm_version.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+jlong atomic_add_jlong(jlong value, jlong volatile* const dest) {
+  jlong compare_value;
+  jlong exchange_value;
+#ifndef SUPPORTS_NATIVE_CX8
+  if (!VM_Version::supports_cx8()) {
+    MutexLockerEx mu(JfrCounters_lock, Mutex::_no_safepoint_check_flag);
+    return *dest += value;
+  }
+#endif
+  do {
+    compare_value = OrderAccess::load_acquire(dest);
+    exchange_value = compare_value + value;
+  } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
+  return exchange_value;
+}
+
+#ifdef ASSERT
+
+// debug statistics
+static volatile jlong _allocated_bytes = 0;
+static volatile jlong _deallocated_bytes = 0;
+static volatile jlong _live_set_bytes = 0;
+
+static void add(size_t alloc_size) {
+  if (!JfrRecorder::is_created()) {
+    const jlong total_allocated = atomic_add_jlong((jlong)alloc_size, &_allocated_bytes);
+    const jlong current_live_set = atomic_add_jlong((jlong)alloc_size, &_live_set_bytes);
+    if (LogJFR && Verbose) tty->print_cr("Allocation: [" SIZE_FORMAT "] bytes", alloc_size);
+    if (LogJFR && Verbose) tty->print_cr("Total alloc [" JLONG_FORMAT "] bytes", total_allocated);
+    if (LogJFR && Verbose) tty->print_cr("Liveset:    [" JLONG_FORMAT "] bytes", current_live_set);
+  }
+}
+
+static void subtract(size_t dealloc_size) {
+  if (!JfrRecorder::is_created()) {
+    const jlong total_deallocated = atomic_add_jlong((jlong)dealloc_size, &_deallocated_bytes);
+    const jlong current_live_set = atomic_add_jlong(((jlong)dealloc_size * -1), &_live_set_bytes);
+    if (LogJFR && Verbose) tty->print_cr("Deallocation: [" SIZE_FORMAT "] bytes", dealloc_size);
+    if (LogJFR && Verbose) tty->print_cr("Total dealloc [" JLONG_FORMAT "] bytes", total_deallocated);
+    if (LogJFR && Verbose) tty->print_cr("Liveset:      [" JLONG_FORMAT "] bytes", current_live_set);
+  }
+}
+
+static void hook_memory_deallocation(size_t dealloc_size) {
+  subtract(dealloc_size);
+}
+#endif // ASSERT
+
+static void hook_memory_allocation(const char* allocation, size_t alloc_size) {
+  if (NULL == allocation) {
+    if (!JfrRecorder::is_created()) {
+      if (LogJFR) tty->print_cr("Memory allocation failed for size [" SIZE_FORMAT "] bytes", alloc_size);
+      return;
+    } else {
+      // after critical startup, fail as by default
+      vm_exit_out_of_memory(alloc_size, OOM_MALLOC_ERROR, "AllocateHeap");
+    }
+  }
+  debug_only(add(alloc_size));
+}
+
+void JfrCHeapObj::on_memory_allocation(const void* allocation, size_t size) {
+  hook_memory_allocation((const char*)allocation, size);
+}
+
+void* JfrCHeapObj::operator new(size_t size) throw() {
+  return operator new(size, std::nothrow);
+}
+
+void* JfrCHeapObj::operator new (size_t size, const std::nothrow_t&  nothrow_constant) throw() {
+  void* const memory = CHeapObj<mtTracing>::operator new(size, nothrow_constant, CALLER_PC);
+  hook_memory_allocation((const char*)memory, size);
+  return memory;
+}
+
+void* JfrCHeapObj::operator new [](size_t size) throw() {
+  return operator new[](size, std::nothrow);
+}
+
+void* JfrCHeapObj::operator new [](size_t size, const std::nothrow_t&  nothrow_constant) throw() {
+  void* const memory = CHeapObj<mtTracing>::operator new[](size, nothrow_constant, CALLER_PC);
+  hook_memory_allocation((const char*)memory, size);
+  return memory;
+}
+
+void JfrCHeapObj::operator delete(void* p, size_t size) {
+  debug_only(hook_memory_deallocation(size);)
+  CHeapObj<mtTracing>::operator delete(p);
+}
+
+void JfrCHeapObj::operator delete[](void* p, size_t size) {
+  debug_only(hook_memory_deallocation(size);)
+  CHeapObj<mtTracing>::operator delete[](p);
+}
+
+char* JfrCHeapObj::realloc_array(char* old, size_t size) {
+  char* const memory = ReallocateHeap(old, size, mtTracing, AllocFailStrategy::RETURN_NULL);
+  hook_memory_allocation(memory, size);
+  return memory;
+}
+
+void JfrCHeapObj::free(void* p, size_t size) {
+  debug_only(hook_memory_deallocation(size);)
+  FreeHeap(p);
+}
+
+char* JfrCHeapObj::allocate_array_noinline(size_t elements, size_t element_size) {
+  return AllocateHeap(elements * element_size, mtTracing, CALLER_PC, AllocFailStrategy::RETURN_NULL);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrAllocation.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRALLOCATION_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRALLOCATION_HPP
+
+#include "memory/allocation.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/exceptions.hpp"
+
+/*
+ * A subclass to the CHeapObj<mtTracing> allocator, useful for critical
+ * Jfr subsystems. Critical in this context means subsystems for which
+ * allocations are crucial to the bootstrap and initialization of Jfr.
+ * The default behaviour by a CHeapObj is to call vm_exit_out_of_memory()
+ * on allocation failure and this is problematic in combination with the
+ * Jfr on-demand, dynamic start at runtime, capability.
+ * We would not like a user dynamically starting Jfr to
+ * tear down the VM she is about to inspect as a side effect.
+ *
+ * This allocator uses the RETURN_NULL capabilities
+ * instead of calling vm_exit_out_of_memory() until Jfr is properly started.
+ * This allows for controlled behaviour on allocation failures during startup,
+ * which means we can take actions on failure, such as transactional rollback
+ * (deallocations and restorations).
+ * In addition, this allocator allows for easy hooking of memory
+ * allocations / deallocations for debugging purposes.
+ */
+
+class JfrCHeapObj : public CHeapObj<mtTracing> {
+ private:
+  static void on_memory_allocation(const void* allocation, size_t size);
+  static char* allocate_array_noinline(size_t elements, size_t element_size);
+
+ public:
+  NOINLINE void* operator new(size_t size) throw();
+  NOINLINE void* operator new (size_t size, const std::nothrow_t&  nothrow_constant) throw();
+  NOINLINE void* operator new [](size_t size) throw();
+  NOINLINE void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant) throw();
+  void  operator delete(void* p, size_t size);
+  void  operator delete [] (void* p, size_t size);
+  static char* realloc_array(char* old, size_t size);
+  static void free(void* p, size_t size = 0);
+
+  template <class T>
+  static T* new_array(size_t size) {
+    T* const memory = (T*)allocate_array_noinline(size, sizeof(T));
+    on_memory_allocation(memory, sizeof(T) * size);
+    return memory;
+  }
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRALLOCATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrBigEndian.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRBIGENDIAN_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRBIGENDIAN_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
+#ifdef TARGET_ARCH_x86
+# include "bytes_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "bytes_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "bytes_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "bytes_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "bytes_ppc.hpp"
+#endif
+
+#ifndef VM_LITTLE_ENDIAN
+# define bigendian_16(x) (x)
+# define bigendian_32(x) (x)
+# define bigendian_64(x) (x)
+#else
+# define bigendian_16(x) Bytes::swap_u2(x)
+# define bigendian_32(x) Bytes::swap_u4(x)
+# define bigendian_64(x) Bytes::swap_u8(x)
+#endif
+
+class JfrBigEndian : AllStatic {
+ private:
+  template <typename T>
+  static T read_bytes(const address location);
+  template <typename T>
+  static T read_unaligned(const address location);
+ public:
+  static bool platform_supports_unaligned_reads(void);
+  static bool is_aligned(const void* location, size_t size);
+  template <typename T>
+  static T read(const void* location);
+};
+
+inline bool JfrBigEndian::is_aligned(const void* location, size_t size) {
+  assert(size <= sizeof(u8), "just checking");
+  if (size == sizeof(u1)) {
+    return true;
+  }
+  // check address alignment for datum access
+  return (((uintptr_t)location & (size -1)) == 0);
+}
+
+template <>
+inline u1 JfrBigEndian::read_bytes(const address location) {
+  return (*location & 0xFF);
+}
+
+template <>
+inline u2 JfrBigEndian::read_bytes(const address location) {
+  return Bytes::get_Java_u2(location);
+}
+
+template <>
+inline u4 JfrBigEndian::read_bytes(const address location) {
+  return Bytes::get_Java_u4(location);
+}
+
+template <>
+inline u8 JfrBigEndian::read_bytes(const address location) {
+  return Bytes::get_Java_u8(location);
+}
+
+template <typename T>
+inline T JfrBigEndian::read_unaligned(const address location) {
+  assert(location != NULL, "just checking");
+  switch (sizeof(T)) {
+    case sizeof(u1) :
+      return read_bytes<u1>(location);
+    case sizeof(u2):
+      return read_bytes<u2>(location);
+    case sizeof(u4):
+      return read_bytes<u4>(location);
+    case sizeof(u8):
+      return read_bytes<u8>(location);
+    default:
+      assert(false, "not reach");
+  }
+  return 0;
+}
+
+inline bool JfrBigEndian::platform_supports_unaligned_reads(void) {
+#if defined(IA32) || defined(AMD64) || defined(PPC) || defined(S390)
+  return true;
+#elif defined(SPARC) || defined(ARM) || defined(AARCH64)
+  return false;
+#else
+  #warning "Unconfigured platform"
+  return false;
+#endif
+}
+
+template<typename T>
+inline T JfrBigEndian::read(const void* location) {
+  assert(location != NULL, "just checking");
+  assert(sizeof(T) <= sizeof(u8), "no support for arbitrary sizes");
+  if (sizeof(T) == sizeof(u1)) {
+    return *(T*)location;
+  }
+  if (is_aligned(location, sizeof(T)) || platform_supports_unaligned_reads()) {
+    // fastest case
+    switch (sizeof(T)) {
+      case sizeof(u1):
+        return *(T*)location;
+      case sizeof(u2):
+        return bigendian_16(*(T*)(location));
+      case sizeof(u4):
+        return bigendian_32(*(T*)(location));
+      case sizeof(u8):
+        return bigendian_64(*(T*)(location));
+    }
+  }
+  return read_unaligned<T>((const address)location);
+}
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRBIGENDIAN_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrDoublyLinkedList.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRDOUBLYLINKEDLIST_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRDOUBLYLINKEDLIST_HPP
+
+#include "memory/allocation.hpp"
+
+template <typename T>
+class JfrDoublyLinkedList {
+ private:
+  T* _head;
+  T* _tail;
+  size_t _count;
+
+  T** list_head() { return &_head; }
+  T** list_tail() { return &_tail; }
+
+ public:
+  typedef T Node;
+  JfrDoublyLinkedList() : _head(NULL), _tail(NULL), _count(0) {}
+  T* head() const { return _head; }
+  T* tail() const { return _tail; }
+  size_t count() const { return _count; }
+  T* clear(bool return_tail = false);
+  T* remove(T* const node);
+  void prepend(T* const node);
+  void append(T* const node);
+  void append_list(T* const head_node, T* const tail_node, size_t count);
+  debug_only(bool in_list(const T* const target_node) const;)
+  debug_only(bool locate(const T* start_node, const T* const target_node) const;)
+};
+
+template <typename T>
+inline void JfrDoublyLinkedList<T>::prepend(T* const node) {
+  assert(node != NULL, "invariant");
+  node->set_prev(NULL);
+  assert(!in_list(node), "already in list error");
+  T** lh = list_head();
+  if (*lh != NULL) {
+    (*lh)->set_prev(node);
+    node->set_next(*lh);
+  } else {
+    T** lt = list_tail();
+    assert(*lt == NULL, "invariant");
+    *lt = node;
+    node->set_next(NULL);
+    assert(tail() == node, "invariant");
+    assert(node->next() == NULL, "invariant");
+  }
+  *lh = node;
+  ++_count;
+  assert(head() == node, "head error");
+  assert(in_list(node), "not in list error");
+  assert(node->prev() == NULL, "invariant");
+}
+
+template <typename T>
+void JfrDoublyLinkedList<T>::append(T* const node) {
+  assert(node != NULL, "invariant");
+  node->set_next(NULL);
+  assert(!in_list(node), "already in list error");
+  T** lt = list_tail();
+  if (*lt != NULL) {
+    // already an existing tail
+    node->set_prev(*lt);
+    (*lt)->set_next(node);
+  } else {
+    // if no tail, also update head
+    assert(*lt == NULL, "invariant");
+    T** lh = list_head();
+    assert(*lh == NULL, "invariant");
+    node->set_prev(NULL);
+    *lh = node;
+    assert(head() == node, "invariant");
+  }
+  *lt = node;
+  ++_count;
+  assert(tail() == node, "invariant");
+  assert(in_list(node), "not in list error");
+  assert(node->next() == NULL, "invariant");
+}
+
+template <typename T>
+T* JfrDoublyLinkedList<T>::remove(T* const node) {
+  assert(node != NULL, "invariant");
+  assert(in_list(node), "invariant");
+  T* const prev = (T*)node->prev();
+  T* const next = (T*)node->next();
+  if (prev == NULL) {
+    assert(head() == node, "head error");
+    if (next != NULL) {
+      next->set_prev(NULL);
+    } else {
+      assert(next == NULL, "invariant");
+      assert(tail() == node, "tail error");
+      T** lt = list_tail();
+      *lt = NULL;
+      assert(tail() == NULL, "invariant");
+    }
+    T** lh = list_head();
+    *lh = next;
+    assert(head() == next, "invariant");
+  } else {
+    assert(prev != NULL, "invariant");
+    if (next == NULL) {
+      assert(tail() == node, "tail error");
+      T** lt = list_tail();
+      *lt = prev;
+      assert(tail() == prev, "invariant");
+    } else {
+       next->set_prev(prev);
+    }
+    prev->set_next(next);
+  }
+  --_count;
+  assert(_count >= 0, "invariant");
+  assert(!in_list(node), "still in list error");
+  return node;
+}
+
+template <typename T>
+T* JfrDoublyLinkedList<T>::clear(bool return_tail /* false */) {
+  T* const node = return_tail ? tail() : head();
+  T** l = list_head();
+  *l = NULL;
+  l = list_tail();
+  *l = NULL;
+  _count = 0;
+  assert(head() == NULL, "invariant");
+  assert(tail() == NULL, "invariant");
+  return node;
+}
+
+#ifdef ASSERT
+template <typename T>
+bool JfrDoublyLinkedList<T>::locate(const T* node, const T* const target) const {
+  assert(target != NULL, "invariant");
+  while (node != NULL) {
+    if (node == target) {
+      return true;
+    }
+    node = (T*)node->next();
+  }
+  return false;
+}
+
+template <typename T>
+bool JfrDoublyLinkedList<T>::in_list(const T* const target) const {
+  assert(target != NULL, "invariant");
+  return locate(head(), target);
+}
+
+template <typename T>
+inline void validate_count_param(T* node, size_t count_param) {
+  assert(node != NULL, "invariant");
+  size_t count = 0;
+  while (node) {
+    ++count;
+    node = (T*)node->next();
+  }
+  assert(count_param == count, "invariant");
+}
+#endif // ASSERT
+
+template <typename T>
+void JfrDoublyLinkedList<T>::append_list(T* const head_node, T* const tail_node, size_t count) {
+  assert(head_node != NULL, "invariant");
+  assert(!in_list(head_node), "already in list error");
+  assert(tail_node != NULL, "invariant");
+  assert(!in_list(tail_node), "already in list error");
+  assert(tail_node->next() == NULL, "invariant");
+  // ensure passed in list nodes are connected
+  assert(locate(head_node, tail_node), "invariant");
+  T** lt = list_tail();
+  if (*lt != NULL) {
+    head_node->set_prev(*lt);
+    (*lt)->set_next(head_node);
+  } else {
+    // no head
+    assert(*lt == NULL, "invariant");
+    T** lh = list_head();
+    assert(*lh == NULL, "invariant");
+    head_node->set_prev(NULL);
+    *lh = head_node;
+    assert(head() == head_node, "invariant");
+  }
+  *lt = tail_node;
+  const T* node = head_node;
+  debug_only(validate_count_param(node, count);)
+    _count += count;
+  assert(tail() == tail_node, "invariant");
+  assert(in_list(tail_node), "not in list error");
+  assert(in_list(head_node), "not in list error");
+}
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRDOUBLYLINKEDLIST_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrHashtable.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRHASHTABLE_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRHASHTABLE_HPP
+
+#include "memory/allocation.inline.hpp"
+#include "runtime/orderAccess.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+template <typename T>
+class JfrBasicHashtableEntry {
+ private:
+  typedef JfrBasicHashtableEntry<T> Entry;
+  Entry* _next;
+  T _literal;          // ref to item in table.
+  uintptr_t _hash;
+
+ public:
+  uintptr_t hash() const { return _hash; }
+  void set_hash(uintptr_t hash) { _hash = hash; }
+  T literal() const { return _literal; }
+  T* literal_addr() { return &_literal; }
+  void set_literal(T s) { _literal = s; }
+  void set_next(Entry* next) { _next = next; }
+  Entry* next() const { return _next; }
+  Entry** next_addr() { return &_next; }
+};
+
+template <typename T>
+class JfrHashtableBucket : public CHeapObj<mtTracing> {
+  template <typename>
+  friend class JfrBasicHashtable;
+ private:
+  typedef JfrBasicHashtableEntry<T> TableEntry;
+  TableEntry* _entry;
+
+  TableEntry* get_entry() const {
+    return (TableEntry*)OrderAccess::load_ptr_acquire(&_entry);
+  }
+  void set_entry(TableEntry* entry) { OrderAccess::release_store_ptr(&_entry, entry);}
+  TableEntry** entry_addr() { return &_entry; }
+};
+
+template <typename T>
+class JfrBasicHashtable : public CHeapObj<mtTracing> {
+ private:
+  typedef JfrHashtableBucket<T> Bucket;
+  typedef JfrBasicHashtableEntry<T> TableEntry;
+  Bucket* _buckets;
+  uintptr_t _table_size;
+  const size_t _entry_size;
+  size_t _number_of_entries;
+
+ protected:
+  JfrBasicHashtable(uintptr_t table_size, size_t entry_size) :
+    _buckets(NULL), _table_size(table_size), _entry_size(entry_size), _number_of_entries(0) {
+    _buckets = NEW_C_HEAP_ARRAY2(Bucket, table_size, mtTracing, CURRENT_PC);
+    memset((void*)_buckets, 0, table_size * sizeof(Bucket));
+  }
+
+  size_t hash_to_index(uintptr_t full_hash) const {
+    const uintptr_t h = full_hash % _table_size;
+    assert(h >= 0 && h < _table_size, "Illegal hash value");
+    return (size_t)h;
+  }
+  size_t entry_size() const { return _entry_size; }
+  void unlink_entry(TableEntry* entry) {
+    entry->set_next(NULL);
+    --_number_of_entries;
+  }
+  void free_buckets() {
+    if (NULL != _buckets) {
+      FREE_C_HEAP_ARRAY(Bucket, _buckets, mtTracing);
+      _buckets = NULL;
+    }
+  }
+  TableEntry* bucket(size_t i) { return _buckets[i].get_entry();}
+  TableEntry** bucket_addr(size_t i) { return _buckets[i].entry_addr(); }
+  uintptr_t table_size() const { return _table_size; }
+  size_t number_of_entries() const { return _number_of_entries; }
+  void add_entry(size_t index, TableEntry* entry) {
+    assert(entry != NULL, "invariant");
+    entry->set_next(bucket(index));
+    _buckets[index].set_entry(entry);
+    ++_number_of_entries;
+  }
+};
+
+template <typename IdType, typename Entry, typename T>
+class AscendingId : public CHeapObj<mtTracing>  {
+ private:
+  IdType _id;
+ public:
+  AscendingId() : _id(0) {}
+  // callbacks
+  void assign_id(Entry* entry) {
+    assert(entry != NULL, "invariant");
+    assert(entry->id() == 0, "invariant");
+    entry->set_id(++_id);
+  }
+  bool equals(const T& data, uintptr_t hash, const Entry* entry) {
+    assert(entry->hash() == hash, "invariant");
+    return true;
+  }
+};
+
+// IdType must be scalar
+template <typename T, typename IdType>
+class Entry : public JfrBasicHashtableEntry<T> {
+ public:
+  typedef IdType ID;
+  void init() { _id = 0; }
+  ID id() const { return _id; }
+  void set_id(ID id) { _id = id; }
+  void set_value(const T& value) { this->set_literal(value); }
+  T& value() const { return *const_cast<Entry*>(this)->literal_addr();}
+  const T* value_addr() const { return const_cast<Entry*>(this)->literal_addr(); }
+
+ private:
+  ID _id;
+};
+
+template <typename T, typename IdType, template <typename, typename> class Entry,
+          typename Callback = AscendingId<IdType, Entry<T, IdType>, T> ,
+          size_t TABLE_SIZE = 1009>
+class HashTableHost : public JfrBasicHashtable<T> {
+ public:
+  typedef Entry<T, IdType> HashEntry;
+  HashTableHost() : _callback(new Callback()) {}
+  HashTableHost(Callback* cb) : JfrBasicHashtable<T>(TABLE_SIZE, sizeof(HashEntry)), _callback(cb) {}
+  ~HashTableHost() {
+    this->clear_entries();
+    this->free_buckets();
+  }
+
+  // direct insert assumes non-existing entry
+  HashEntry& put(const T& data, uintptr_t hash);
+
+  // lookup entry, will put if not found
+  HashEntry& lookup_put(const T& data, uintptr_t hash) {
+    HashEntry* entry = lookup_only(data, hash);
+    return entry == NULL ? put(data, hash) : *entry;
+  }
+
+  // read-only lookup
+  HashEntry* lookup_only(const T& query, uintptr_t hash);
+
+  // id retrieval
+  IdType id(const T& data, uintptr_t hash) {
+    assert(data != NULL, "invariant");
+    const HashEntry& entry = lookup_put(data, hash);
+    assert(entry.id() > 0, "invariant");
+    return entry.id();
+  }
+
+  template <typename Functor>
+  void iterate_value(Functor& f);
+
+  template <typename Functor>
+  void iterate_entry(Functor& f);
+
+  size_t cardinality() const { return this->number_of_entries(); }
+  bool has_entries() const { return this->cardinality() > 0; }
+  void clear_entries();
+
+  // removal and deallocation
+  void free_entry(HashEntry* entry) {
+    assert(entry != NULL, "invariant");
+    JfrBasicHashtable<T>::unlink_entry(entry);
+    FREE_C_HEAP_ARRAY(char, entry, mtTracing);
+  }
+
+ private:
+  Callback* _callback;
+  size_t index_for(uintptr_t hash) { return this->hash_to_index(hash); }
+  HashEntry* new_entry(const T& data, uintptr_t hash);
+  void add_entry(size_t index, HashEntry* new_entry) {
+    assert(new_entry != NULL, "invariant");
+    _callback->assign_id(new_entry);
+    assert(new_entry->id() > 0, "invariant");
+    JfrBasicHashtable<T>::add_entry(index, new_entry);
+  }
+};
+
+template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
+Entry<T, IdType>& HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::put(const T& data, uintptr_t hash) {
+  assert(lookup_only(data, hash) == NULL, "use lookup_put()");
+  HashEntry* const entry = new_entry(data, hash);
+  add_entry(index_for(hash), entry);
+  return *entry;
+}
+
+template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
+Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::lookup_only(const T& query, uintptr_t hash) {
+  HashEntry* entry = (HashEntry*)this->bucket(index_for(hash));
+  while (entry != NULL) {
+    if (entry->hash() == hash && _callback->equals(query, hash, entry)) {
+      return entry;
+    }
+    entry = (HashEntry*)entry->next();
+  }
+  return NULL;
+}
+
+template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
+template <typename Functor>
+void HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::iterate_value(Functor& f) {
+  for (size_t i = 0; i < this->table_size(); ++i) {
+    const HashEntry* entry = (const HashEntry*)this->bucket(i);
+    while (entry != NULL) {
+      if (!f(entry->value())) {
+        break;
+      }
+      entry = (HashEntry*)entry->next();
+    }
+  }
+}
+
+template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
+template <typename Functor>
+void HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::iterate_entry(Functor& f) {
+  for (size_t i = 0; i < this->table_size(); ++i) {
+    const HashEntry* entry = (const HashEntry*)this->bucket(i);
+    while (entry != NULL) {
+      if (!f(entry)) {
+        break;
+      }
+      entry = (const HashEntry*)entry->next();
+    }
+  }
+}
+
+template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
+void HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::clear_entries() {
+  for (size_t i = 0; i < this->table_size(); ++i) {
+    HashEntry** bucket = (HashEntry**)this->bucket_addr(i);
+    HashEntry* entry = *bucket;
+    while (entry != NULL) {
+      HashEntry* entry_to_remove = entry;
+      entry = (HashEntry*)entry->next();
+      this->free_entry(entry_to_remove);
+    }
+    *bucket = NULL;
+  }
+  assert(this->number_of_entries() == 0, "should have removed all entries");
+}
+
+template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
+Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::new_entry(const T& data, uintptr_t hash) {
+  assert(sizeof(HashEntry) == this->entry_size(), "invariant");
+  HashEntry* const entry = (HashEntry*) NEW_C_HEAP_ARRAY2(char, this->entry_size(), mtTracing, CURRENT_PC);
+  entry->init();
+  entry->set_hash(hash);
+  entry->set_value(data);
+  entry->set_next(NULL);
+  assert(0 == entry->id(), "invariant");
+  return entry;
+}
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRHASHTABLE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrIterator.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRLISTITERATOR_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRLISTITERATOR_HPP
+
+#include "memory/allocation.hpp"
+
+enum jfr_iter_direction {
+  forward = 1,
+  backward
+};
+
+template <typename Node>
+class StopOnNullCondition : public AllStatic {
+ public:
+  static bool has_next(const Node* node) {
+    return node != NULL;
+  }
+};
+
+template <typename List, template <typename> class ContinuationPredicate>
+class Navigator {
+ public:
+  typedef typename List::Node Node;
+  typedef jfr_iter_direction Direction;
+  Navigator(List& list, Direction direction) :
+    _list(list), _node(direction == forward ? list.head() : list.tail()), _direction(direction) {}
+  bool has_next() const {
+    return ContinuationPredicate<Node>::has_next(_node);
+  }
+
+  bool direction_forward() const {
+    return _direction == forward;
+  }
+
+  Node* next() const {
+    assert(_node != NULL, "invariant");
+    Node* temp = _node;
+    _node = direction_forward() ? (Node*)_node->next() : (Node*)_node->prev();
+    return temp;
+  }
+
+  void set_direction(Direction direction) {
+    _direction = direction;
+  }
+
+  void reset(Direction direction) {
+    set_direction(direction);
+    _node = direction_forward() ? _list.head() : _list.tail();
+  }
+
+ private:
+  List& _list;
+  mutable Node* _node;
+  Direction _direction;
+};
+
+template <typename List>
+class NavigatorStopOnNull : public Navigator<List, StopOnNullCondition> {
+ public:
+  NavigatorStopOnNull(List& list, jfr_iter_direction direction = forward) : Navigator<List, StopOnNullCondition>(list, direction) {}
+};
+
+template<typename List, template <typename> class Navigator, typename AP = StackObj>
+class IteratorHost : public AP {
+ private:
+  Navigator<List> _navigator;
+
+ public:
+  typedef typename List::Node Node;
+  typedef jfr_iter_direction Direction;
+  IteratorHost(List& list, Direction direction = forward) : AP(), _navigator(list, direction) {}
+  void reset(Direction direction = forward) { _navigator.reset(direction); }
+  bool has_next() const { return _navigator.has_next(); }
+  Node* next() const { return _navigator.next(); }
+  void set_direction(Direction direction) { _navigator.set_direction(direction); }
+};
+
+template<typename List, typename AP = StackObj>
+class StopOnNullIterator : public IteratorHost<List, NavigatorStopOnNull, AP> {
+ public:
+  StopOnNullIterator(List& list, jfr_iter_direction direction = forward) : IteratorHost<List, NavigatorStopOnNull, AP>(list, direction) {}
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRLISTITERATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrJavaLog.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/utilities/jfrJavaLog.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/thread.inline.hpp"
+
+// #define JFR_LOG_TAGS_CONCATED(T0, T1, T2, T3, T4, T5, ...)  \
+//   T0 ## _ ## T1 ## _ ## T2 ## _ ## T3 ## _ ## T4 ## _ ## T5
+
+// enum JfrLogTagSetType {
+// #define JFR_LOG_TAG(...) \
+//     EXPAND_VARARGS(JFR_LOG_TAGS_CONCATED(__VA_ARGS__, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG)),
+
+//     JFR_LOG_TAG_SET_LIST
+
+// #undef JFR_LOG_TAG
+//     JFR_LOG_TAG_SET_COUNT
+// };
+
+// struct jfrLogSubscriber
+// {
+//   jobject log_tag_enum_ref;
+//   LogTagSet* log_tag_set;
+// };
+
+// static jfrLogSubscriber log_tag_sets[JFR_LOG_TAG_SET_COUNT];
+
+// static void log_cfg_update(LogLevelType llt, JfrLogTagSetType jflt, TRAPS) {
+//   DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+//   if (log_tag_sets[jflt].log_tag_enum_ref == NULL) {
+//     return;
+//   }
+//   jobject lt = log_tag_sets[jflt].log_tag_enum_ref;
+//   // set field tagSetLevel to llt value
+//   JavaValue result(T_VOID);
+//   JfrJavaArguments args(&result);
+//   args.set_klass(JfrJavaSupport::klass(lt));
+//   args.set_name("tagSetLevel", CHECK);
+//   args.set_signature("I", CHECK);
+//   args.set_receiver(JfrJavaSupport::resolve_non_null(lt));
+//   args.push_int(llt);
+//   JfrJavaSupport::set_field(&args, THREAD);
+// }
+
+// static LogLevelType highest_level(const LogTagSet& lts) {
+//   for (size_t i = 0; i < LogLevel::Count; i++) {
+//     if (lts.is_level((LogLevelType)i)) {
+//       return (LogLevelType)i;
+//     }
+//   }
+//   return LogLevel::Off;
+// }
+
+// static void log_config_change_internal(bool init, TRAPS) {
+//   LogLevelType llt;
+//   LogTagSet* lts;
+
+// #define JFR_LOG_TAG(...) \
+//   lts = &LogTagSetMapping<LOG_TAGS(__VA_ARGS__)>::tagset(); \
+//   if (init) { \
+//     JfrLogTagSetType tagSetType = \
+//       EXPAND_VARARGS(JFR_LOG_TAGS_CONCATED(__VA_ARGS__, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG)); \
+//     assert(NULL == log_tag_sets[tagSetType].log_tag_set, "Init JFR LogTagSets twice"); \
+//     log_tag_sets[tagSetType].log_tag_set = lts; \
+//   } \
+//   llt = highest_level(*lts); \
+//   log_cfg_update(llt, \
+//   EXPAND_VARARGS(JFR_LOG_TAGS_CONCATED(__VA_ARGS__, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG)), THREAD);
+//   JFR_LOG_TAG_SET_LIST
+// #undef JFR_LOG_TAG
+// }
+
+// static void log_config_change() {
+//   Thread* t = Thread::current();
+//   DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t));
+//   log_config_change_internal(false, t);
+// }
+
+void JfrJavaLog::subscribe_log_level(jobject log_tag, jint id, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  // static bool subscribed_updates = true;
+  // assert(id < JFR_LOG_TAG_SET_COUNT,
+  //   "LogTag id, java and native not in synch, %d < %d", id, JFR_LOG_TAG_SET_COUNT);
+  // assert(NULL == log_tag_sets[id].log_tag_enum_ref, "Subscribing twice");
+  // log_tag_sets[id].log_tag_enum_ref = JfrJavaSupport::global_jni_handle(log_tag, THREAD);
+  // if (subscribed_updates) {
+  //   LogConfiguration::register_update_listener(&log_config_change);
+  //   log_config_change_internal(true, THREAD);
+  //   subscribed_updates = false;
+  // } else {
+  //   log_config_change_internal(false, THREAD);
+  // }
+}
+
+void JfrJavaLog::log(jint tag_set, jint level, jstring message, TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  if (!LogJFR) {
+    return;
+  }
+  if (message == NULL) {
+    return;
+  }
+  // if (level < (jint)LogLevel::First || level > (jint)LogLevel::Last) {
+  //   JfrJavaSupport::throw_illegal_argument_exception("LogLevel passed is outside valid range", THREAD);
+  //   return;
+  // }
+  // if (tag_set < 0 || tag_set >= (jint)JFR_LOG_TAG_SET_COUNT) {
+  //   JfrJavaSupport::throw_illegal_argument_exception("LogTagSet id is outside valid range", THREAD);
+  //   return;
+  // }
+  ResourceMark rm(THREAD);
+  const char* const s = JfrJavaSupport::c_str(message, CHECK);
+  assert(s != NULL, "invariant");
+  // assert(log_tag_sets[tag_set].log_tag_set != NULL, "LogTagSet is not init");
+  // log_tag_sets[tag_set].log_tag_set->log((LogLevelType)level, s);
+  tty->print_cr("JFR: %s", s);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrJavaLog.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRJAVALOG_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRJAVALOG_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/exceptions.hpp"
+
+/*
+ * A thin two-way "bridge" allowing our Java components to interface with Unified Logging (UL)
+ *
+ * Java can "subscribe" to be notified about UL configuration changes.
+ * On such a configuration change, if applicable, the passed in LogTag enum instance
+ * will be updated to reflect a new LogLevel.
+ *
+ * Log messages originating in Java are forwarded to UL for output.
+ *
+ */
+
+class JfrJavaLog : public AllStatic {
+ public:
+  static void subscribe_log_level(jobject log_tag, jint id, TRAPS);
+  static void log(jint tag_set, jint level, jstring message, TRAPS);
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRJAVALOG_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrRefCountPointer.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRREFCOUNTPOINTER_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRREFCOUNTPOINTER_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "runtime/atomic.hpp"
+
+template <typename T>
+class RefCountHandle {
+  template <typename, typename>
+  friend class RefCountPointer;
+ private:
+  const T* _ptr;
+
+  RefCountHandle(const T* ptr) : _ptr(ptr) {
+    assert(_ptr != NULL, "invariant");
+    _ptr->add_ref();
+  }
+
+ public:
+  RefCountHandle() : _ptr(NULL) {}
+
+  RefCountHandle(const RefCountHandle<T>& rhs) : _ptr(rhs._ptr) {
+    if (_ptr != NULL) {
+      _ptr->add_ref();
+    }
+  }
+
+  ~RefCountHandle() {
+    if (_ptr != NULL) {
+      const T* temp = _ptr;
+      _ptr = NULL;
+      temp->remove_ref();
+    }
+  }
+
+  // The copy-and-swap idiom upholds reference counting semantics
+  void operator=(RefCountHandle<T> rhs) {
+    const T* temp = rhs._ptr;
+    rhs._ptr = _ptr;
+    _ptr = temp;
+  }
+
+  bool operator==(const RefCountHandle<T>& rhs) const {
+    return _ptr == rhs._ptr;
+  }
+
+  bool operator!=(const RefCountHandle<T>& rhs) const {
+    return !operator==(rhs);
+  }
+
+  bool valid() const {
+    return _ptr != NULL;
+  }
+
+  const T & operator->() const {
+    return *_ptr;
+  }
+
+  T& operator->() {
+    return *const_cast<T*>(_ptr);
+  }
+};
+
+class MultiThreadedRefCounter {
+ private:
+  mutable volatile int _refs;
+ public:
+  MultiThreadedRefCounter() : _refs(0) {}
+
+  void inc() const {
+    Atomic::add(1, &_refs);
+  }
+
+  bool dec() const {
+    return 0 == Atomic::add((-1), &_refs);
+  }
+
+  int current() const {
+   return _refs;
+  }
+};
+
+template <typename T, typename RefCountImpl = MultiThreadedRefCounter>
+class RefCountPointer : public JfrCHeapObj {
+  template <typename>
+  friend class RefCountHandle;
+  typedef RefCountHandle<RefCountPointer<T, RefCountImpl> > RefHandle;
+ private:
+  const T* _ptr;
+  mutable RefCountImpl _refs;
+
+  // disallow multiple copies
+  RefCountPointer(const RefCountPointer<T, RefCountImpl>& rhs);
+  void operator=(const RefCountPointer<T, RefCountImpl>& rhs);
+
+  ~RefCountPointer() {
+    assert(_refs.current() == 0, "invariant");
+    delete const_cast<T*>(_ptr);
+  }
+
+  void add_ref() const {
+    _refs.inc();
+  }
+
+  void remove_ref() const {
+    if (_refs.dec()) {
+      delete this;
+    }
+  }
+
+  RefCountPointer(const T* ptr) : _ptr(ptr), _refs() {
+    assert(_ptr != NULL, "invariant");
+  }
+
+ public:
+  const T* operator->() const {
+    return _ptr;
+  }
+
+  T* operator->() {
+    return const_cast<T*>(_ptr);
+  }
+
+  static RefHandle make(const T* ptr) {
+    assert(ptr != NULL, "invariant");
+    return RefHandle(new RefCountPointer<T, RefCountImpl>(ptr));
+  }
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRREFCOUNTPOINTER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrResourceManager.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_RESOURCEMANAGER_HPP
+#define SHARE_VM_JFR_UTILITIES_RESOURCEMANAGER_HPP
+
+#include "memory/allocation.hpp"
+
+template <class T>
+class ResourceManager : public StackObj {
+ private:
+  T* const _resource;
+ public:
+  ResourceManager(T* resource) : _resource(resource) {}
+  ~ResourceManager() {
+    if (_resource != NULL) {
+      delete _resource;
+    }
+  }
+  operator T*() { return _resource; }
+  T* operator->() { return _resource; }
+};
+
+template <class T>
+class ResourceArrayManager : public StackObj {
+ private:
+  T* const _resource_array;
+ public:
+  ResourceArrayManager(T* resource_array) : _resource_array(resource_array) {}
+  ~ResourceArrayManager() {
+    if (_resource_array != NULL) {
+      delete [] _resource_array;
+    }
+  }
+  operator T*() { return _resource_array; }
+  T* operator->() const { return _resource_array; }
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_RESOURCEMANAGER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrSpinlockHelper.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP
+
+#include "runtime/thread.hpp"
+
+// this utility could be useful for non cx8 platforms
+
+class JfrSpinlockHelper {
+ private:
+  volatile int* const _lock;
+
+ public:
+  JfrSpinlockHelper(volatile int* lock) : _lock(lock) {
+    Thread::SpinAcquire(_lock, NULL);
+  }
+
+  JfrSpinlockHelper(volatile int* const lock, const char* name) : _lock(lock) {
+    Thread::SpinAcquire(_lock, name);
+  }
+
+  ~JfrSpinlockHelper() {
+    Thread::SpinRelease(_lock);
+  }
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrTime.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "runtime/os.hpp"
+#if defined(X86) && !defined(ZERO)
+#include "rdtsc_x86.hpp"
+#endif
+
+bool JfrTime::_ft_enabled = false;
+
+bool JfrTime::initialize() {
+  static bool initialized = false;
+  if (!initialized) {
+#if defined(X86) && !defined(ZERO)
+    _ft_enabled = Rdtsc::initialize();
+#else
+    _ft_enabled = false;
+#endif
+    initialized = true;
+  }
+  return initialized;
+}
+
+bool JfrTime::is_ft_supported() {
+#if defined(X86) && !defined(ZERO)
+  return Rdtsc::is_supported();
+#else
+  return false;
+#endif
+}
+
+
+const void* JfrTime::time_function() {
+#if defined(X86) && !defined(ZERO)
+  return _ft_enabled ? (const void*)Rdtsc::elapsed_counter : (const void*)os::elapsed_counter;
+#else
+  return (const void*)os::elapsed_counter;
+#endif
+}
+
+jlong JfrTime::frequency() {
+#if defined(X86) && !defined(ZERO)
+  return _ft_enabled ? Rdtsc::frequency() : os::elapsed_frequency();
+#else
+  return os::elapsed_frequency();
+#endif
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrTime.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRTIME_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRTIME_HPP
+
+#include "utilities/ticks.hpp"
+
+typedef TimeInstant<CounterRepresentation, FastUnorderedElapsedCounterSource> JfrTicks;
+typedef TimeInterval<CounterRepresentation, FastUnorderedElapsedCounterSource> JfrTickspan;
+
+class JfrTime {
+ private:
+  static bool _ft_enabled;
+ public:
+  static bool initialize();
+  static bool is_ft_enabled() { return _ft_enabled; }
+  static bool is_ft_supported();
+  static jlong frequency();
+  static const void* time_function();
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRTIME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrTimeConverter.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/utilities/jfrTimeConverter.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+#include "runtime/os.hpp"
+
+static double ft_counter_to_nanos_factor = .0;
+static double nanos_to_ft_counter_factor = .0;
+static double os_counter_to_nanos_factor = .0;
+static double nanos_to_os_counter_factor = .0;
+
+const double JfrTimeConverter::NANOS_PER_SEC      = 1000000000.0;
+const double JfrTimeConverter::NANOS_PER_MILLISEC = 1000000.0;
+const double JfrTimeConverter::NANOS_PER_MICROSEC = 1000.0;
+
+static bool initialized = false;
+
+void JfrTimeConverter::initialize() {
+  if (!initialized) {
+    nanos_to_os_counter_factor = (double)os::elapsed_frequency() / NANOS_PER_SEC;
+    assert(nanos_to_os_counter_factor != .0, "error in conversion!");
+    os_counter_to_nanos_factor = (double)1.0 / nanos_to_os_counter_factor;
+    assert(os_counter_to_nanos_factor != .0, "error in conversion!");
+    if (JfrTime::is_ft_enabled()) {
+      nanos_to_ft_counter_factor = (double)JfrTime::frequency() / NANOS_PER_SEC;
+      assert(nanos_to_ft_counter_factor != .0, "error in conversion!");
+      ft_counter_to_nanos_factor = (double)1.0 / nanos_to_ft_counter_factor;
+      assert(ft_counter_to_nanos_factor != .0, "error in conversion!");
+    }
+    initialized = true;
+  }
+}
+
+double JfrTimeConverter::counter_to_nano_multiplier(bool is_os_time) {
+  if (!initialized) {
+    initialize();
+  }
+  return JfrTime::is_ft_enabled() && !is_os_time ? ft_counter_to_nanos_factor : os_counter_to_nanos_factor;
+}
+
+double JfrTimeConverter::nano_to_counter_multiplier(bool is_os_time) {
+  if (!initialized) {
+    initialize();
+  }
+  return JfrTime::is_ft_enabled() && !is_os_time ? nanos_to_ft_counter_factor : nanos_to_os_counter_factor;
+}
+
+double JfrTimeConverter::counter_to_nanos_internal(jlong c, bool is_os_time) {
+  return (double)c * counter_to_nano_multiplier(is_os_time);
+}
+
+double JfrTimeConverter::counter_to_millis_internal(jlong c, bool is_os_time) {
+  return (counter_to_nanos_internal(c, is_os_time) / NANOS_PER_MILLISEC);
+}
+
+jlong JfrTimeConverter::counter_to_nanos(jlong c, bool is_os_time) {
+  return (jlong)counter_to_nanos_internal(c, is_os_time);
+}
+
+jlong JfrTimeConverter::counter_to_millis(jlong c, bool is_os_time) {
+  return (jlong)counter_to_millis_internal(c, is_os_time);
+}
+
+jlong JfrTimeConverter::nanos_to_countertime(jlong nanos, bool as_os_time) {
+  return nanos <= 0 ? 0 : (jlong)((double)nanos * nano_to_counter_multiplier(as_os_time));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrTimeConverter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRTIMECONVERTER_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRTIMECONVERTER_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class JfrTimeConverter : AllStatic {
+ private:
+  static double counter_to_nano_multiplier(bool is_os_time = false);
+  static double counter_to_nanos_internal(jlong c, bool is_os_time = false);
+  static double counter_to_millis_internal(jlong c, bool is_os_time = false);
+  static void initialize();
+
+ public:
+  static const double NANOS_PER_SEC;
+  static const double NANOS_PER_MILLISEC;
+  static const double NANOS_PER_MICROSEC;
+
+  // factors
+  static double nano_to_counter_multiplier(bool is_os_time = false);
+  // ticks to nanos
+  static jlong counter_to_nanos(jlong c, bool is_os_time = false);
+  // ticks to millis
+  static jlong counter_to_millis(jlong c, bool is_os_time = false);
+  // nanos to ticks
+  static jlong nanos_to_countertime(jlong c, bool as_os_time = false);
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRTIMECONVERTER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrTryLock.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRTRYLOCK_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRTRYLOCK_HPP
+
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/debug.hpp"
+
+class JfrTryLock {
+ private:
+  volatile int* const _lock;
+  bool _has_lock;
+
+ public:
+  JfrTryLock(volatile int* lock) : _lock(lock), _has_lock(Atomic::cmpxchg(1, lock, 0) == 0) {}
+
+  ~JfrTryLock() {
+    if (_has_lock) {
+      OrderAccess::fence();
+      *_lock = 0;
+    }
+  }
+
+  bool has_lock() const {
+    return _has_lock;
+  }
+};
+
+class JfrMonitorTryLock : public StackObj {
+ private:
+  Monitor* _lock;
+  bool _acquired;
+
+ public:
+  JfrMonitorTryLock(Monitor* lock) : _lock(lock), _acquired(lock->try_lock()) {}
+
+  ~JfrMonitorTryLock() {
+    if (_acquired) {
+      assert(_lock->owned_by_self(), "invariant");
+      _lock->unlock();
+    }
+  }
+
+  bool acquired() const {
+    return _acquired;
+  }
+
+};
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRTRYLOCK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/utilities/jfrTypes.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_UTILITIES_JFRTYPES_HPP
+#define SHARE_VM_JFR_UTILITIES_JFRTYPES_HPP
+
+#include "jfrfiles/jfrEventIds.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+typedef u8 traceid;
+typedef int fio_fd;
+const int invalid_fd = -1;
+const jlong invalid_offset = -1;
+
+enum EventStartTime {
+  UNTIMED,
+  TIMED
+};
+
+jlong atomic_add_jlong(jlong value, jlong volatile* const dest);
+
+#endif // SHARE_VM_JFR_UTILITIES_JFRTYPES_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrBigEndianWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRBIGENDIANWRITER_HPP
+#define SHARE_VM_JFR_WRITERS_JFRBIGENDIANWRITER_HPP
+
+#include "jfr/writers/jfrEncoding.hpp"
+#include "jfr/writers/jfrMemoryWriterHost.inline.hpp"
+#include "jfr/writers/jfrStorageAdapter.hpp"
+#include "jfr/writers/jfrWriterHost.inline.hpp"
+
+typedef MemoryWriterHost<NoOwnershipAdapter, StackObj > MemoryWriter;
+typedef WriterHost<BigEndianEncoder, BigEndianEncoder, MemoryWriter> BigEndianWriterBase;
+
+class JfrBigEndianWriter : public BigEndianWriterBase {
+ public:
+  template <typename StorageType>
+  JfrBigEndianWriter(StorageType* storage, size_t size) : BigEndianWriterBase(storage, size + size_safety_cushion) {}
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRBIGENDIANWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrEncoders.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRENCODERS_HPP
+#define SHARE_VM_JFR_WRITERS_JFRENCODERS_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#ifdef TARGET_ARCH_x86
+# include "bytes_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "bytes_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "bytes_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "bytes_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "bytes_ppc.hpp"
+#endif
+
+//
+// The Encoding policy prescribes a template
+// method taking a first parameter of type T.
+// This is the value to be encoded. The second
+// parameter is a memory address - where to write
+// the encoded value.
+// The encoder method(s) should return the
+// number of bytes encoded into that memory address.
+//
+// template <typename T>
+// size_t encoder(T value, u1* dest);
+//
+// The caller ensures the destination
+// address is not null and that T can be fitted
+// in encoded form.
+//
+
+// Encoding policy classes
+
+class BigEndianEncoderImpl {
+ public:
+  template <typename T>
+  static size_t encode(T value, u1* dest);
+
+  template <typename T>
+  static size_t encode(const T* src, size_t len, u1* dest);
+
+  template <typename T>
+  static size_t encode_padded(T value, u1* dest);
+
+  template <typename T>
+  static size_t encode_padded(const T* src, size_t len, u1* dest);
+
+};
+
+template <typename T>
+inline size_t BigEndianEncoderImpl::encode(T value, u1* dest) {
+  assert(dest != NULL, "invariant");
+  switch (sizeof(T)) {
+    case 1: {
+      ShouldNotReachHere();
+       return 0;
+     }
+     case 2: {
+       Bytes::put_Java_u2(dest, value);
+       return 2;
+     }
+     case 4: {
+       Bytes::put_Java_u4(dest, value);
+       return 4;
+     }
+     case 8: {
+       Bytes::put_Java_u8(dest, value);
+       return 8;
+     }
+  }
+  ShouldNotReachHere();
+  return 0;
+}
+
+template <typename T>
+inline size_t BigEndianEncoderImpl::encode(const T* src, size_t len, u1* dest) {
+  assert(dest != NULL, "invariant");
+  assert(len >= 1, "invariant");
+  if (1 == sizeof(T)) {
+    memcpy(dest, src, len);
+    return len;
+  }
+  size_t size = encode(*src, dest);
+  if (len > 1) {
+    for (size_t i = 1; i < len; ++i) {
+      size += encode(*(src + i), dest + size);
+    }
+  }
+  return size;
+}
+
+template <typename T>
+inline size_t BigEndianEncoderImpl::encode_padded(T value, u1* dest) {
+  return encode(value, dest);
+}
+
+template <typename T>
+inline size_t BigEndianEncoderImpl::encode_padded(const T* src, size_t len, u1* dest) {
+  assert(dest != NULL, "invariant");
+  assert(len >= 1, "invariant");
+  if (1 == sizeof(T)) {
+    memcpy(dest, src, len);
+    return len;
+  }
+  size_t size = encode_padded(*src, dest);
+  if (len > 1) {
+    for (size_t i = 1; i < len; ++i) {
+      size += encode_padded(*(src + i), dest + size);
+    }
+  }
+  return size;
+}
+
+
+// The Varint128 encoder implements encoding according to
+// msb(it) 128bit encoding (1 encode bit | 7 value bits),
+// using least significant byte order.
+//
+// Example (little endian platform):
+// Value: 25674
+// Binary: 00000000 0000000 01100100 01001010
+// Varint encoded (3 bytes):
+// Value: 13289473
+// Varint encoded: 11001010 11001000 00000001
+//
+
+class Varint128EncoderImpl {
+ private:
+  template <typename T>
+  static u8 to_u8(T value);
+
+ public:
+  template <typename T>
+  static size_t encode(T value, u1* dest);
+
+  template <typename T>
+  static size_t encode(const T* src, size_t len, u1* dest);
+
+  template <typename T>
+  static size_t encode_padded(T value, u1* dest);
+
+  template <typename T>
+  static size_t encode_padded(const T* src, size_t len, u1* dest);
+
+};
+
+template <typename T>
+inline u8 Varint128EncoderImpl::to_u8(T value) {
+  switch(sizeof(T)) {
+    case 1:
+     return static_cast<u8>(static_cast<u1>(value) & static_cast<u1>(0xff));
+    case 2:
+      return static_cast<u8>(static_cast<u2>(value) & static_cast<u2>(0xffff));
+    case 4:
+      return static_cast<u8>(static_cast<u4>(value) & static_cast<u4>(0xffffffff));
+    case 8:
+      return static_cast<u8>(value);
+    default:
+      fatal("unsupported type");
+  }
+  return 0;
+}
+
+static const u1 ext_bit = 0x80;
+#define GREATER_THAN_OR_EQUAL_TO_128(v) (((u8)(~(ext_bit - 1)) & (v)))
+#define LESS_THAN_128(v) !GREATER_THAN_OR_EQUAL_TO_128(v)
+
+template <typename T>
+inline size_t Varint128EncoderImpl::encode(T value, u1* dest) {
+  assert(dest != NULL, "invariant");
+
+  const u8 v = to_u8(value);
+
+  if (LESS_THAN_128(v)) {
+    *dest = static_cast<u1>(v); // set bit 0-6, no extension
+    return 1;
+  }
+  *dest = static_cast<u1>(v | ext_bit); // set bit 0-6, with extension
+  if (LESS_THAN_128(v >> 7)) {
+    *(dest + 1) = static_cast<u1>(v >> 7); // set bit 7-13, no extension
+    return 2;
+  }
+  *(dest + 1) = static_cast<u1>((v >> 7) | ext_bit); // set bit 7-13, with extension
+  if (LESS_THAN_128(v >> 14)) {
+    *(dest + 2) = static_cast<u1>(v >> 14); // set bit 14-20, no extension
+    return 3;
+  }
+  *(dest + 2) = static_cast<u1>((v >> 14) | ext_bit); // set bit 14-20, with extension
+  if (LESS_THAN_128(v >> 21)) {
+    *(dest + 3) = static_cast<u1>(v >> 21); // set bit 21-27, no extension
+    return 4;
+  }
+  *(dest + 3) = static_cast<u1>((v >> 21) | ext_bit); // set bit 21-27, with extension
+  if (LESS_THAN_128(v >> 28)) {
+    *(dest + 4) = static_cast<u1>(v >> 28); // set bit 28-34, no extension
+    return 5;
+  }
+  *(dest + 4) = static_cast<u1>((v >> 28) | ext_bit); // set bit 28-34, with extension
+  if (LESS_THAN_128(v >> 35)) {
+    *(dest + 5) = static_cast<u1>(v >> 35); // set bit 35-41, no extension
+    return 6;
+  }
+  *(dest + 5) = static_cast<u1>((v >> 35) | ext_bit); // set bit 35-41, with extension
+  if (LESS_THAN_128(v >> 42)) {
+    *(dest + 6) = static_cast<u1>(v >> 42); // set bit 42-48, no extension
+    return 7;
+  }
+  *(dest + 6) = static_cast<u1>((v >> 42) | ext_bit); // set bit 42-48, with extension
+  if (LESS_THAN_128(v >> 49)) {
+    *(dest + 7) = static_cast<u1>(v >> 49); // set bit 49-55, no extension
+    return 8;
+  }
+  *(dest + 7) = static_cast<u1>((v >> 49) | ext_bit); // set bit 49-55, with extension
+  // no need to extend since only 64 bits allowed.
+  *(dest + 8) = static_cast<u1>(v >> 56);  // set bit 56-63
+  return 9;
+}
+
+template <typename T>
+inline size_t Varint128EncoderImpl::encode(const T* src, size_t len, u1* dest) {
+  assert(dest != NULL, "invariant");
+  assert(len >= 1, "invariant");
+  size_t size = encode(*src, dest);
+  if (len > 1) {
+    for (size_t i = 1; i < len; ++i) {
+      size += encode(*(src + i), dest + size);
+    }
+  }
+  return size;
+}
+
+template <typename T>
+inline size_t Varint128EncoderImpl::encode_padded(T value, u1* dest) {
+  assert(dest != NULL, "invariant");
+  const u8 v = to_u8(value);
+  switch (sizeof(T)) {
+    case 1:
+      dest[0] = static_cast<u1>(v);
+      return 1;
+    case 2:
+      dest[0] = static_cast<u1>(v | 0x80);
+      dest[1] = static_cast<u1>(v >> 7);
+      return 2;
+    case 4:
+      dest[0] = static_cast<u1>(v | 0x80);
+      dest[1] = static_cast<u1>(v >> 7 | 0x80);
+      dest[2] = static_cast<u1>(v >> 14 | 0x80);
+      dest[3] = static_cast<u1>(v >> 21);
+      return 4;
+    case 8:
+      dest[0] = static_cast<u1>(v | 0x80);
+      dest[1] = static_cast<u1>(v >> 7 | 0x80);
+      dest[2] = static_cast<u1>(v >> 14 | 0x80);
+      dest[3] = static_cast<u1>(v >> 21 | 0x80);
+      dest[4] = static_cast<u1>(v >> 28 | 0x80);
+      dest[5] = static_cast<u1>(v >> 35 | 0x80);
+      dest[6] = static_cast<u1>(v >> 42 | 0x80);
+      dest[7] = static_cast<u1>(v >> 49);
+      return 8;
+    default:
+      ShouldNotReachHere();
+    }
+  return 0;
+}
+
+
+template <typename T>
+inline size_t Varint128EncoderImpl::encode_padded(const T* src, size_t len, u1* dest) {
+  assert(dest != NULL, "invariant");
+  assert(len >= 1, "invariant");
+  size_t size = encode_padded(*src, dest);
+  if (len > 1) {
+    for (size_t i = 1; i < len; ++i) {
+      size += encode_padded(*(src + i), dest + size);
+    }
+  }
+  return size;
+}
+
+#endif // SHARE_VM_JFR_WRITERS_JFRENCODERS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrEncoding.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRENCODING_HPP
+#define SHARE_VM_JFR_WRITERS_JFRENCODING_HPP
+
+#include "jfr/writers/jfrEncoders.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+enum JfrStringEncoding {
+  NULL_STRING = 0,
+  EMPTY_STRING,
+  STRING_CONSTANT,
+  UTF8,
+  UTF16,
+  LATIN1,
+  NOF_STRING_ENCODINGS
+};
+
+template <typename IntegerEncoder, typename BaseEncoder>
+class EncoderHost : public AllStatic {
+ public:
+  template <typename T>
+  static u1* be_write(T value, u1* pos) {
+    return be_write(&value, 1, pos);
+  }
+
+  template <typename T>
+  static u1* be_write(const T* value, size_t len, u1* pos) {
+    assert(value != NULL, "invariant");
+    assert(pos != NULL, "invariant");
+    assert(len > 0, "invariant");
+    return pos + BaseEncoder::encode(value, len, pos);
+  }
+
+  template <typename T>
+  static u1* write_padded(T value, u1* pos) {
+    assert(pos != NULL, "invariant");
+    return write_padded(&value, 1, pos);
+  }
+
+  template <typename T>
+  static u1* write_padded(const T* value, size_t len, u1* pos) {
+    assert(value != NULL, "invariant");
+    assert(pos != NULL, "invariant");
+    assert(len > 0, "invariant");
+    return pos + IntegerEncoder::encode_padded(value, len, pos);
+  }
+
+  template <typename T>
+  static u1* write(T value, u1* pos) {
+    return write(&value, 1, pos);
+  }
+
+  template <typename T>
+  static u1* write(const T* value, size_t len, u1* pos) {
+    assert(value != NULL, "invariant");
+    assert(pos != NULL, "invariant");
+    assert(len > 0, "invariant");
+    return pos + IntegerEncoder::encode(value, len, pos);
+  }
+
+  static u1* write(bool value, u1* pos) {
+    return be_write((u1)value, pos);
+  }
+
+  static u1* write(float value, u1* pos) {
+    return be_write(*(u4*)&(value), pos);
+  }
+
+  static u1* write(double value, u1* pos) {
+    return be_write(*(u8*)&(value), pos);
+  }
+
+  static u1* write(const char* value, u1* pos) {
+    u2 len = 0;
+    if (value != NULL) {
+      len = MIN2<u2>(max_jushort, (jushort)strlen(value));
+    }
+    pos = write(len, pos);
+    if (len > 0) {
+      pos = be_write(value, len, pos);
+    }
+    return pos;
+  }
+
+  static u1* write(char* value, u1* pos) {
+    return write(const_cast<const char*>(value), pos);
+  }
+};
+
+typedef EncoderHost<BigEndianEncoderImpl, BigEndianEncoderImpl> BigEndianEncoder;
+typedef EncoderHost<Varint128EncoderImpl, BigEndianEncoderImpl> CompressedIntegerEncoder;
+
+#endif // SHARE_VM_JFR_WRITERS_JFRENCODING_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrEventWriterHost.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFREVENTWRITERHOST_HPP
+#define SHARE_VM_JFR_WRITERS_JFREVENTWRITERHOST_HPP
+
+#include "jfr/writers/jfrWriterHost.inline.hpp"
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+class EventWriterHost : public WriterHost<BE, IE, WriterPolicyImpl> {
+ public:
+  template <typename StorageType>
+  EventWriterHost(StorageType* storage, Thread* thread);
+  EventWriterHost(Thread* thread);
+  void begin_write();
+  intptr_t end_write();
+  void begin_event_write();
+  intptr_t end_event_write();
+};
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+class StackEventWriterHost : public EventWriterHost<BE, IE, WriterPolicyImpl> {
+ public:
+  template <typename StorageType>
+  StackEventWriterHost(StorageType* storage, Thread* thread);
+  StackEventWriterHost(Thread* thread);
+  ~StackEventWriterHost();
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFREVENTWRITERHOST_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrEventWriterHost.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFREVENTWRITERHOST_INLINE_HPP
+#define SHARE_VM_JFR_WRITERS_JFREVENTWRITERHOST_INLINE_HPP
+
+#include "jfr/writers/jfrEventWriterHost.hpp"
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+template <typename StorageType>
+inline EventWriterHost<BE, IE, WriterPolicyImpl>::
+EventWriterHost(StorageType* storage, Thread* thread) : WriterHost<BE, IE, WriterPolicyImpl>(storage, thread) {}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline EventWriterHost<BE, IE, WriterPolicyImpl>::EventWriterHost(Thread* thread) : WriterHost<BE, IE, WriterPolicyImpl>(thread) {
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void EventWriterHost<BE, IE, WriterPolicyImpl>::begin_write() {
+  assert(this->is_valid(), "invariant");
+  assert(!this->is_acquired(), "calling begin with writer already in acquired state!");
+  this->acquire();
+  assert(this->used_offset() == 0, "invariant");
+  assert(this->is_acquired(), "invariant");
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline intptr_t EventWriterHost<BE, IE, WriterPolicyImpl>::end_write(void) {
+  assert(this->is_acquired(),
+    "state corruption, calling end with writer with non-acquired state!");
+  return this->is_valid() ? this->used_offset() : 0;
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void EventWriterHost<BE, IE, WriterPolicyImpl>::begin_event_write() {
+  assert(this->is_valid(), "invariant");
+  assert(!this->is_acquired(), "calling begin with writer already in acquired state!");
+  this->begin_write();
+  this->reserve(sizeof(u4)); // reserve the event size slot
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline intptr_t EventWriterHost<BE, IE, WriterPolicyImpl>::end_event_write() {
+  assert(this->is_acquired(), "invariant");
+  if (!this->is_valid()) {
+    this->release();
+    return 0;
+  }
+  const u4 written = (u4)end_write();
+  if (written > sizeof(u4)) { // larger than header reserve
+    this->write_padded_at_offset(written, 0);
+    this->commit();
+  }
+  this->release();
+  assert(!this->is_acquired(), "invariant");
+  return written;
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+template <typename StorageType>
+inline StackEventWriterHost<BE, IE, WriterPolicyImpl>::
+StackEventWriterHost(StorageType* storage, Thread* thread) : EventWriterHost<BE, IE, WriterPolicyImpl>(storage, thread) {
+  this->begin_event_write();
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline StackEventWriterHost<BE, IE, WriterPolicyImpl>::StackEventWriterHost(Thread* thread) : EventWriterHost<BE, IE, WriterPolicyImpl>(thread) {
+  this->begin_event_write();
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline StackEventWriterHost<BE, IE, WriterPolicyImpl>::~StackEventWriterHost() {
+  this->end_event_write();
+}
+
+#endif // SHARE_VM_JFR_WRITERS_JFREVENTWRITERHOST_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrJavaEventWriter.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jni.h"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/recorder/storage/jfrStorage.hpp"
+#include "jfr/support/jfrThreadId.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "jfr/writers/jfrJavaEventWriter.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/fieldDescriptor.hpp"
+#include "runtime/handles.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/thread.inline.hpp"
+
+static int start_pos_offset = invalid_offset;
+static int start_pos_address_offset = invalid_offset;
+static int current_pos_offset = invalid_offset;
+static int max_pos_offset = invalid_offset;
+static int max_event_size_offset = invalid_offset;
+static int notified_offset = invalid_offset;
+static int thread_id_offset = invalid_offset;
+static int valid_offset = invalid_offset;
+
+static bool find_field(InstanceKlass* ik,
+                       Symbol* name_symbol,
+                       Symbol* signature_symbol,
+                       fieldDescriptor* fd,
+                       bool is_static = false,
+                       bool allow_super = false) {
+  if (allow_super || is_static) {
+    return ik->find_field(name_symbol, signature_symbol, is_static, fd) != NULL;
+  } else {
+    return ik->find_local_field(name_symbol, signature_symbol, fd);
+  }
+}
+
+static void compute_offset(int &dest_offset,
+                           Klass* klass,
+                           Symbol* name_symbol,
+                           Symbol* signature_symbol,
+                           bool is_static = false, bool allow_super = false) {
+  fieldDescriptor fd;
+  InstanceKlass* ik = InstanceKlass::cast(klass);
+  if (!find_field(ik, name_symbol, signature_symbol, &fd, is_static, allow_super)) {
+    assert(false, "invariant");
+  }
+  dest_offset = fd.offset();
+}
+
+static bool setup_event_writer_offsets(TRAPS) {
+  const char class_name[] = "jdk/jfr/internal/EventWriter";
+  Symbol* const k_sym = SymbolTable::lookup(class_name, sizeof class_name - 1, CHECK_false);
+  assert(k_sym != NULL, "invariant");
+  Klass* klass = SystemDictionary::resolve_or_fail(k_sym, true, CHECK_false);
+  assert(klass != NULL, "invariant");
+
+  const char start_pos_name[] = "startPosition";
+  Symbol* const start_pos_sym = SymbolTable::lookup(start_pos_name, sizeof start_pos_name - 1, CHECK_false);
+  assert(start_pos_sym != NULL, "invariant");
+  assert(invalid_offset == start_pos_offset, "invariant");
+  compute_offset(start_pos_offset, klass, start_pos_sym, vmSymbols::long_signature());
+  assert(start_pos_offset != invalid_offset, "invariant");
+
+  const char start_pos_address_name[] = "startPositionAddress";
+  Symbol* const start_pos_address_sym = SymbolTable::lookup(start_pos_address_name, sizeof start_pos_address_name - 1, CHECK_false);
+  assert(start_pos_address_sym != NULL, "invariant");
+  assert(invalid_offset == start_pos_address_offset, "invariant");
+  compute_offset(start_pos_address_offset, klass, start_pos_address_sym, vmSymbols::long_signature());
+  assert(start_pos_address_offset != invalid_offset, "invariant");
+
+  const char event_pos_name[] = "currentPosition";
+  Symbol* const event_pos_sym = SymbolTable::lookup(event_pos_name, sizeof event_pos_name - 1, CHECK_false);
+  assert(event_pos_sym != NULL, "invariant");
+  assert(invalid_offset == current_pos_offset, "invariant");
+  compute_offset(current_pos_offset, klass, event_pos_sym,vmSymbols::long_signature());
+  assert(current_pos_offset != invalid_offset, "invariant");
+
+  const char max_pos_name[] = "maxPosition";
+  Symbol* const max_pos_sym = SymbolTable::lookup(max_pos_name, sizeof max_pos_name - 1, CHECK_false);
+  assert(max_pos_sym != NULL, "invariant");
+  assert(invalid_offset == max_pos_offset, "invariant");
+  compute_offset(max_pos_offset, klass, max_pos_sym, vmSymbols::long_signature());
+  assert(max_pos_offset != invalid_offset, "invariant");
+
+  const char max_event_size_name[] = "maxEventSize";
+  Symbol* const max_event_size_sym = SymbolTable::lookup(max_event_size_name, sizeof max_event_size_name - 1, CHECK_false);
+  assert (max_event_size_sym != NULL, "invariant");
+  assert(invalid_offset == max_event_size_offset, "invariant");
+  compute_offset(max_event_size_offset, klass, max_event_size_sym, vmSymbols::int_signature());
+  assert(max_event_size_offset != invalid_offset, "invariant");
+
+  const char notified_name[] = "notified";
+  Symbol* const notified_sym = SymbolTable::lookup(notified_name, sizeof notified_name - 1, CHECK_false);
+  assert (notified_sym != NULL, "invariant");
+  assert(invalid_offset == notified_offset, "invariant");
+  compute_offset(notified_offset, klass, notified_sym, vmSymbols::bool_signature());
+  assert(notified_offset != invalid_offset, "invariant");
+
+  const char valid_name[] = "valid";
+  Symbol* const valid_sym = SymbolTable::lookup(valid_name, sizeof valid_name - 1, CHECK_false);
+  assert (valid_sym != NULL, "invariant");
+  assert(invalid_offset == valid_offset, "invariant");
+  compute_offset(valid_offset, klass, valid_sym, vmSymbols::bool_signature());
+  assert(valid_offset != invalid_offset, "invariant");
+  return true;
+}
+
+bool JfrJavaEventWriter::has_required_classes(TRAPS) {
+  const char class_name[] = "jdk/jfr/internal/EventWriter";
+  Symbol* const k_sym = SymbolTable::lookup(class_name, sizeof class_name - 1, CHECK_false);
+  Klass* klass = SystemDictionary::resolve_or_null(k_sym, CHECK_false);
+  return (klass != NULL);
+}
+
+bool JfrJavaEventWriter::initialize() {
+  static bool initialized = false;
+  if (!initialized) {
+    Thread* thread = Thread::current();
+    initialized = setup_event_writer_offsets(thread);
+  }
+  return initialized;
+}
+
+jboolean JfrJavaEventWriter::flush(jobject writer, jint used, jint requested, JavaThread* jt) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
+  assert(writer != NULL, "invariant");
+  oop const w = JNIHandles::resolve_non_null(writer);
+  assert(w != NULL, "invariant");
+  JfrBuffer* const current = jt->jfr_thread_local()->java_buffer();
+  assert(current != NULL, "invariant");
+  JfrBuffer* const buffer = JfrStorage::flush(current, used, requested, false, jt);
+  assert(buffer != NULL, "invariant");
+  // "validity" is contextually defined here to mean
+  // that some memory location was provided that is
+  // large enough to accommodate the "requested size".
+  const bool is_valid = buffer->free_size() >= (size_t)(used + requested);
+  u1* const new_current_position = is_valid ? buffer->pos() + used : buffer->pos();
+  w->long_field_put(start_pos_offset, (jlong)buffer->pos());
+  w->long_field_put(current_pos_offset, (jlong)new_current_position);
+  // only update java writer if underlying memory changed
+  if (buffer != current) {
+    w->long_field_put(start_pos_address_offset, (jlong)buffer->pos_address());
+    w->long_field_put(max_pos_offset, (jlong)buffer->end());
+  }
+  if (!is_valid) {
+    // mark writer as invalid for this write attempt
+    w->release_bool_field_put(valid_offset, JNI_FALSE);
+    return JNI_FALSE;
+  }
+  // An exclusive use of a leased buffer is treated equivalent to
+  // holding a system resource. As such, it should be released as soon as possible.
+  // Returning true here signals that the thread will need to call flush again
+  // on EventWriter.endEvent() and that flush will return the lease.
+  return buffer->lease() ? JNI_TRUE : JNI_FALSE;
+}
+
+class JfrJavaEventWriterNotificationClosure : public ThreadClosure {
+ public:
+   void do_thread(Thread* t) {
+     if (t->is_Java_thread()) {
+       JfrJavaEventWriter::notify((JavaThread*)t);
+     }
+   }
+};
+
+void JfrJavaEventWriter::notify() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  JfrJavaEventWriterNotificationClosure closure;
+  Threads::threads_do(&closure);
+}
+
+void JfrJavaEventWriter::notify(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  if (jt->jfr_thread_local()->has_java_event_writer()) {
+    oop buffer_writer = JNIHandles::resolve_non_null(jt->jfr_thread_local()->java_event_writer());
+    assert(buffer_writer != NULL, "invariant");
+    buffer_writer->release_bool_field_put(notified_offset, JNI_TRUE);
+  }
+}
+
+static jobject create_new_event_writer(JfrBuffer* buffer, TRAPS) {
+  assert(buffer != NULL, "invariant");
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  HandleMark hm(THREAD);
+  static const char klass[] = "jdk/jfr/internal/EventWriter";
+  static const char method[] = "<init>";
+  static const char signature[] = "(JJJJZ)V";
+  JavaValue result(T_OBJECT);
+  JfrJavaArguments args(&result, klass, method, signature, CHECK_NULL);
+  // parameters
+  args.push_long((jlong)buffer->pos());
+  args.push_long((jlong)buffer->end());
+  args.push_long((jlong)buffer->pos_address());
+  args.push_long((jlong)JFR_THREAD_ID(THREAD));
+  args.push_int((int)JNI_TRUE);
+  JfrJavaSupport::new_object_global_ref(&args, CHECK_NULL);
+  return result.get_jobject();
+}
+
+jobject JfrJavaEventWriter::event_writer(Thread* t) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t));
+  JfrThreadLocal* const tl = t->jfr_thread_local();
+  assert(tl->shelved_buffer() == NULL, "invariant");
+  return tl->java_event_writer();
+}
+
+jobject JfrJavaEventWriter::new_event_writer(TRAPS) {
+  DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
+  assert(event_writer(THREAD) == NULL, "invariant");
+  JfrThreadLocal* const tl = THREAD->jfr_thread_local();
+  assert(!tl->has_java_buffer(), "invariant");
+  JfrBuffer* const buffer = tl->java_buffer();
+  if (buffer == NULL) {
+    JfrJavaSupport::throw_out_of_memory_error("OOME for thread local buffer", THREAD);
+    return NULL;
+  }
+  jobject java_event_writer = create_new_event_writer(buffer, CHECK_NULL);
+  tl->set_java_event_writer(java_event_writer);
+  assert(tl->has_java_event_writer(), "invariant");
+  return java_event_writer;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrJavaEventWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRJAVAEVENTWRITER_HPP
+#define SHARE_VM_JFR_WRITERS_JFRJAVAEVENTWRITER_HPP
+
+#include "jni.h"
+#include "memory/allocation.hpp"
+
+class JavaThread;
+class Thread;
+
+class JfrJavaEventWriter : AllStatic {
+  friend class JfrCheckpointThreadClosure;
+  friend class JfrJavaEventWriterNotifyOperation;
+  friend class JfrJavaEventWriterNotificationClosure;
+ private:
+  static void notify(JavaThread* jt);
+
+ public:
+  static bool has_required_classes(TRAPS);
+  static bool initialize();
+  static void notify();
+  static jobject event_writer(Thread* t);
+  static jobject new_event_writer(TRAPS);
+  static jboolean flush(jobject writer, jint used, jint requested, JavaThread* jt);
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRJAVAEVENTWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrMemoryWriterHost.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRMEMORYWRITERHOST_HPP
+#define SHARE_VM_JFR_WRITERS_JFRMEMORYWRITERHOST_HPP
+
+#include "jfr/writers/jfrStorageHost.inline.hpp"
+
+#ifdef ASSERT
+class ExclusiveAccessAssert {
+ private:
+  bool _acquired;
+ public:
+  ExclusiveAccessAssert() : _acquired(false) {}
+  void acquire() { assert_non_acquired(); _acquired = true; }
+  void release() { assert_acquired(); _acquired = false; }
+  bool is_acquired() const { return _acquired; }
+  void assert_acquired() const { assert(_acquired, "Not acquired!"); }
+  void assert_non_acquired() const { assert(!_acquired, "Already acquired!"); }
+};
+#else
+ class ExclusiveAccessAssert {};
+#endif
+
+template <typename Adapter, typename AP, typename AccessAssert = ExclusiveAccessAssert>
+class MemoryWriterHost : public StorageHost<Adapter, AP> {
+  debug_only(AccessAssert _access;)
+ public:
+  typedef typename Adapter::StorageType StorageType;
+ protected:
+  void bytes(void* dest, const void* buf, size_t len);
+  MemoryWriterHost(StorageType* storage, Thread* thread);
+  MemoryWriterHost(StorageType* storage, size_t size);
+  MemoryWriterHost(Thread* thread);
+  debug_only(bool is_acquired() const;)
+ public:
+  void acquire();
+  void release();
+};
+
+template <typename Adapter, typename AP>
+class AcquireReleaseMemoryWriterHost : public MemoryWriterHost<Adapter, AP> {
+ public:
+  typedef typename Adapter::StorageType StorageType;
+  AcquireReleaseMemoryWriterHost(StorageType* storage, Thread* thread);
+  AcquireReleaseMemoryWriterHost(StorageType* storage, size_t size);
+  AcquireReleaseMemoryWriterHost(Thread* thread);
+  ~AcquireReleaseMemoryWriterHost();
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRMEMORYWRITERHOST_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrMemoryWriterHost.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRMEMORYWRITERHOST_INLINE_HPP
+#define SHARE_VM_JFR_WRITERS_JFRMEMORYWRITERHOST_INLINE_HPP
+
+#include "jfr/writers/jfrMemoryWriterHost.hpp"
+
+template <typename Adapter, typename AP, typename AccessAssert>
+inline void MemoryWriterHost<Adapter, AP, AccessAssert>::bytes(void* dest, const void* buf, size_t len) {
+  assert(dest != NULL, "invariant");
+  memcpy(dest, buf, len); // no encoding
+  this->set_current_pos(len);
+}
+
+template <typename Adapter, typename AP, typename AccessAssert>
+inline MemoryWriterHost<Adapter, AP, AccessAssert>::MemoryWriterHost(typename Adapter::StorageType* storage, Thread* thread) :
+  StorageHost<Adapter, AP>(storage, thread) {
+}
+
+template <typename Adapter, typename AP, typename AccessAssert>
+inline MemoryWriterHost<Adapter, AP, AccessAssert>::MemoryWriterHost(typename Adapter::StorageType* storage, size_t size) :
+  StorageHost<Adapter, AP>(storage, size) {
+}
+
+template <typename Adapter, typename AP, typename AccessAssert>
+inline MemoryWriterHost<Adapter, AP, AccessAssert>::MemoryWriterHost(Thread* thread) :
+  StorageHost<Adapter, AP>(thread) {
+}
+
+template <typename Adapter, typename AP, typename AccessAssert>
+inline void MemoryWriterHost<Adapter, AP, AccessAssert>::acquire() {
+  debug_only(_access.acquire();)
+  if (!this->is_valid()) {
+    this->flush();
+  }
+  debug_only(is_acquired();)
+}
+
+template <typename Adapter, typename AP, typename AccessAssert>
+inline void MemoryWriterHost<Adapter, AP, AccessAssert>::release() {
+  debug_only(is_acquired();)
+  StorageHost<Adapter, AP>::release();
+  debug_only(_access.release();)
+}
+
+#ifdef ASSERT
+template <typename Adapter, typename AP, typename AccessAssert>
+inline bool MemoryWriterHost<Adapter, AP, AccessAssert>::is_acquired() const {
+  return _access.is_acquired();
+}
+#endif
+
+template <typename Adapter, typename AP>
+inline AcquireReleaseMemoryWriterHost<Adapter, AP>::AcquireReleaseMemoryWriterHost(typename Adapter::StorageType* storage, Thread* thread) :
+  MemoryWriterHost<Adapter, AP>(storage, thread) {
+  this->acquire();
+}
+
+template <typename Adapter, typename AP>
+inline AcquireReleaseMemoryWriterHost<Adapter, AP>::AcquireReleaseMemoryWriterHost(typename Adapter::StorageType* storage, size_t size) :
+  MemoryWriterHost<Adapter, AP>(storage, size) {
+  this->acquire();
+}
+
+template <typename Adapter, typename AP>
+inline AcquireReleaseMemoryWriterHost<Adapter, AP>::AcquireReleaseMemoryWriterHost(Thread* thread) :
+  MemoryWriterHost<Adapter, AP>(thread) {
+  this->acquire();
+}
+
+template <typename Adapter, typename AP>
+inline AcquireReleaseMemoryWriterHost<Adapter, AP>::~AcquireReleaseMemoryWriterHost() {
+  assert(this->is_acquired(), "invariant");
+  this->release();
+}
+
+#endif // SHARE_VM_JFR_WRITERS_JFRMEMORYWRITERHOST_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrNativeEventWriter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRNATIVEEVENTWRITER_HPP
+#define SHARE_VM_JFR_WRITERS_JFRNATIVEEVENTWRITER_HPP
+
+#include "jfr/support/jfrFlush.hpp"
+#include "jfr/writers/jfrEncoding.hpp"
+#include "jfr/writers/jfrEventWriterHost.inline.hpp"
+#include "jfr/writers/jfrMemoryWriterHost.inline.hpp"
+#include "jfr/writers/jfrStorageAdapter.hpp"
+
+typedef Adapter<JfrFlush> JfrNativeEventAdapter;
+typedef MemoryWriterHost<JfrNativeEventAdapter, StackObj> JfrNativeEventWriterImpl;
+typedef StackEventWriterHost<BigEndianEncoder, CompressedIntegerEncoder, JfrNativeEventWriterImpl> JfrNativeEventWriter;
+
+#endif // SHARE_VM_JFR_WRITERS_JFRNATIVEEVENTWRITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrPosition.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRPOSITION_HPP
+#define SHARE_VM_JFR_WRITERS_JFRPOSITION_HPP
+
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+template <typename AP> // AllocationPolicy
+class Position : public AP {
+ private:
+  const u1* _start_pos; // logical start
+  u1* _current_pos;
+  const u1* _end_pos;
+
+ protected:
+  const u1* start_pos() const;
+  void set_start_pos(const u1* position);
+  u1* current_pos();
+  void set_current_pos(const u1* new_position);
+  void set_current_pos(size_t size);
+  const u1* end_pos() const;
+  void set_end_pos(const u1* position);
+  Position(const u1* start_pos, size_t size);
+  Position();
+
+ public:
+  size_t available_size() const;
+  intptr_t used_offset() const;
+  intptr_t current_offset() const;
+  size_t used_size() const;
+  void reset();
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRPOSITION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrPosition.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRPOSITION_INLINE_HPP
+#define SHARE_VM_JFR_WRITERS_JFRPOSITION_INLINE_HPP
+
+#include "jfr/writers/jfrPosition.hpp"
+
+template <typename AP>
+inline const u1* Position<AP>::start_pos() const {
+  return _start_pos;
+}
+
+template <typename AP>
+inline void Position<AP>::set_start_pos(const u1* position) {
+  _start_pos = position;
+}
+
+template <typename AP>
+inline u1* Position<AP>::current_pos() {
+  return _current_pos;
+}
+
+template <typename AP>
+inline void Position<AP>::set_current_pos(const u1* new_position) {
+  _current_pos = const_cast<u1*>(new_position);
+}
+
+template <typename AP>
+inline void Position<AP>::set_current_pos(size_t size) {
+  _current_pos += size;
+}
+
+template <typename AP>
+inline const u1* Position<AP>::end_pos() const {
+  return _end_pos;
+}
+
+template <typename AP>
+inline void Position<AP>::set_end_pos(const u1* position) {
+  _end_pos = position;
+}
+
+template <typename AP>
+inline Position<AP>::Position(const u1* start_pos, size_t size) :
+  AP(),
+  _start_pos(start_pos),
+  _current_pos(const_cast<u1*>(start_pos)),
+  _end_pos(start_pos + size) {
+}
+
+template <typename AP>
+inline Position<AP>::Position() : _start_pos(NULL), _current_pos(NULL), _end_pos(NULL) {
+}
+
+template <typename AP>
+inline size_t Position<AP>::available_size() const {
+  return _end_pos - _current_pos;
+}
+
+template <typename AP>
+inline intptr_t Position<AP>::used_offset() const {
+  return _current_pos - _start_pos;
+}
+
+template <typename AP>
+inline intptr_t Position<AP>::current_offset() const {
+  return this->used_offset();
+}
+
+template <typename AP>
+inline size_t Position<AP>::used_size() const {
+  return (size_t)used_offset();
+}
+
+template <typename AP>
+inline void Position<AP>::reset() {
+  set_current_pos(_start_pos);
+}
+
+#endif // SHARE_VM_JFR_WRITERS_JFRPOSITION_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrStorageAdapter.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRSTORAGEADAPTER_HPP
+#define SHARE_VM_JFR_WRITERS_JFRSTORAGEADAPTER_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+
+class Thread;
+
+//
+// The adapters present writers with a uniform interface over storage.
+//
+// Adapter policy
+//
+// StorageType* storage();
+// const u1* start() const;
+// const u1* pos();
+// const u1* end() const;
+// void commit(u1* position);
+// bool flush(size_t used, size_t requested);
+// void release();
+//
+
+template <typename Flush>
+class Adapter {
+ public:
+  typedef typename Flush::Type StorageType;
+  Adapter(StorageType* storage, Thread* thread) : _storage(storage), _thread(thread) {}
+  Adapter(Thread* thread) : _storage(NULL), _thread(thread) {}
+
+  void set_storage(StorageType* storage) {
+    _storage = storage;
+  }
+
+  StorageType* storage() {
+    return _storage;
+  }
+
+  const u1* start() const {
+    assert(_storage != NULL, "invariant");
+    return _storage->start();
+  }
+
+  u1* pos() {
+    assert(_storage != NULL, "invariant");
+    return _storage->pos();
+  }
+
+  const u1* end() const {
+    assert(_storage != NULL, "invariant");
+    return _storage->end();
+  }
+
+  void commit(u1* position) {
+    assert(_storage != NULL, "invariant");
+    _storage->set_pos(position);
+  }
+
+  bool flush(size_t used, size_t requested) {
+    assert(_thread != NULL, "invariant");
+    Flush f(_storage, used, requested, _thread);
+    _storage = f.result();
+    return _storage != NULL;
+  }
+
+  void release() {
+    if (_storage != NULL && _storage->lease()) {
+      // This flush call will return the lease
+      // of a temporary storage area.
+      // Since the requested size is 0,
+      // the flush implementation will accomodate
+      // that 'size' request in the
+      // original thread local storage,
+      // by implication restoring the original
+      // in the process of returning a lease.
+      flush(0, 0);
+    }
+  }
+
+ private:
+  StorageType* _storage;
+  Thread* _thread;
+};
+
+template <size_t DEFAULT_SIZE = K>
+class MallocAdapter {
+ private:
+  u1* _start;
+  u1* _pos;
+  u1* _end;
+  size_t _initial_size;
+  bool _has_ownership;
+
+  bool allocate(size_t size);
+  void deallocate();
+
+ public:
+  typedef u1 StorageType;
+  MallocAdapter(u1* storage, Thread* thread);
+  MallocAdapter(u1* storage, size_t size);
+  MallocAdapter(Thread* thread);
+  ~MallocAdapter();
+
+  StorageType* storage() { return _start; }
+  const u1* start() const { return _start; }
+  u1* pos() { return _pos; }
+  void commit(u1* position) { _pos = position; }
+  const u1* end() const { return _end; }
+  void release() {}
+  bool flush(size_t used, size_t requested);
+};
+
+template <size_t DEFAULT_SIZE>
+MallocAdapter<DEFAULT_SIZE>::MallocAdapter(u1* storage, size_t size) :
+  _start(storage),
+  _pos(storage),
+  _end(storage + size),
+  _initial_size(size),
+  _has_ownership(false) {
+}
+
+template <size_t DEFAULT_SIZE>
+MallocAdapter<DEFAULT_SIZE> ::MallocAdapter(u1* storage, Thread* thread) :
+  _start(storage),
+  _pos(storage),
+  _end(storage),
+  _initial_size(0),
+  _has_ownership(false) {
+}
+
+template <size_t DEFAULT_SIZE>
+MallocAdapter<DEFAULT_SIZE>::MallocAdapter(Thread* thread) :
+  _start(NULL),
+  _pos(NULL),
+  _end(NULL),
+  _initial_size(DEFAULT_SIZE),
+  _has_ownership(true) {
+  allocate(DEFAULT_SIZE);
+}
+
+template <size_t DEFAULT_SIZE>
+MallocAdapter<DEFAULT_SIZE>::~MallocAdapter() {
+  if (_has_ownership) {
+    deallocate();
+  }
+}
+
+template <size_t DEFAULT_SIZE>
+bool MallocAdapter<DEFAULT_SIZE>::allocate(size_t size) {
+  if (NULL == _start) {
+    _start = JfrCHeapObj::new_array<u1>(size);
+    if (_start) {
+      _pos = _start;
+      _end = _start + size;
+      _initial_size = size;
+    }
+  }
+  return _start != NULL;
+}
+
+template <size_t DEFAULT_SIZE>
+void MallocAdapter<DEFAULT_SIZE>::deallocate() {
+  if (_start != NULL) {
+    JfrCHeapObj::free(_start, (size_t)(_end - _start));
+  }
+}
+
+template <size_t DEFAULT_SIZE>
+bool MallocAdapter<DEFAULT_SIZE>::flush(size_t used, size_t requested) {
+  if (!_has_ownership) {
+    // can't just realloc a storage that we don't own
+    return false;
+  }
+  assert(_start != NULL, "invariant");
+  assert(used <= (size_t)(_end - _pos), "invariant");
+  assert(_pos + used <= _end, "invariant");
+  const size_t previous_storage_size = _end - _start;
+  const size_t new_storage_size = used + requested + (previous_storage_size * 2);
+  u1* const new_storage = JfrCHeapObj::new_array<u1>(new_storage_size);
+  if (!new_storage) {
+    return false;
+  }
+  const size_t previous_pos_offset = _pos - _start;
+  // migrate in-flight data
+  memcpy(new_storage, _start, previous_pos_offset + used);
+  JfrCHeapObj::free(_start, previous_storage_size);
+  _start = new_storage;
+  _pos = _start + previous_pos_offset;
+  _end = _start + new_storage_size;
+  return true;
+}
+
+class NoOwnershipAdapter {
+ private:
+  u1* _start;
+  u1* _pos;
+  u1* _end;
+  size_t _size;
+
+ public:
+  typedef u1 StorageType;
+  NoOwnershipAdapter(u1* storage, size_t size) : _start(storage), _pos(storage), _end(storage + size), _size(size) {}
+  NoOwnershipAdapter(u1* storage, Thread* thread) : _start(storage), _pos(storage), _end(storage), _size(0) {
+    ShouldNotCallThis();
+  }
+  NoOwnershipAdapter(Thread* thread) : _start(NULL), _pos(NULL), _end(NULL), _size(0) {
+    ShouldNotCallThis();
+  }
+  StorageType* storage() { return _start; }
+  const u1* start() const { return _start; }
+  u1* pos() { return _pos; }
+  void commit(u1* position) { _pos = position; }
+  const u1* end() const { return _end; }
+  void release() {}
+  bool flush(size_t used, size_t requested) {
+    // don't flush/expand a buffer that is not our own
+    return false;
+  }
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRSTORAGEADAPTER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrStorageHost.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRSTORAGEHOST_HPP
+#define SHARE_VM_JFR_WRITERS_JFRSTORAGEHOST_HPP
+
+#include "jfr/writers/jfrPosition.inline.hpp"
+
+template <typename Adapter, typename AP> // Adapter and AllocationPolicy
+class StorageHost : public Position<AP> {
+ public:
+  typedef typename Adapter::StorageType StorageType;
+ private:
+  Adapter _adapter;
+
+ protected:
+  void bind();
+  void soft_reset();
+  void hard_reset();
+  void cancel();
+  bool is_backed();
+  bool accommodate(size_t used, size_t requested);
+  void commit();
+  void release();
+  StorageHost(StorageType* storage, Thread* thread);
+  StorageHost(StorageType* storage, size_t size);
+  StorageHost(Thread* thread);
+
+ public:
+  StorageType* storage();
+  bool is_valid() const;
+  void set_storage(StorageType* storage);
+  void flush();
+  void seek(intptr_t offset);
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRSTORAGEHOST_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrStorageHost.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRSTORAGEHOST_INLINE_HPP
+#define SHARE_VM_JFR_WRITERS_JFRSTORAGEHOST_INLINE_HPP
+
+#include "jfr/writers/jfrStorageHost.hpp"
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::bind() {
+  if (is_backed()) {
+    this->hard_reset();
+    assert(is_valid(), "invariant");
+    return;
+  }
+  this->set_start_pos(NULL);
+  this->set_current_pos((const u1*)NULL);
+  this->set_end_pos(NULL);
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::soft_reset() {
+  this->set_start_pos(this->current_pos());
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::hard_reset() {
+  this->set_start_pos(_adapter.pos());
+  this->set_current_pos(_adapter.pos());
+  this->set_end_pos(_adapter.end());
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::cancel() {
+  this->set_end_pos(NULL);
+}
+
+template <typename Adapter, typename AP>
+inline bool StorageHost<Adapter, AP>::is_backed() {
+  return _adapter.storage() != NULL;
+}
+
+template <typename Adapter, typename AP>
+inline bool StorageHost<Adapter, AP>::accommodate(size_t used, size_t requested) {
+  if (!_adapter.flush(used, requested)) {
+    this->cancel();
+    return false;
+  }
+  assert(is_backed(), "invariant");
+  this->hard_reset();
+  this->set_current_pos(used);
+  return true;
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::commit() {
+  if (this->is_valid()) {
+    assert(_adapter.pos() == this->start_pos(), "invariant");
+    assert(_adapter.end() == this->end_pos(), "invariant");
+    u1* new_position = this->current_pos();
+    _adapter.commit(new_position);
+    this->set_start_pos(new_position);
+  }
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::release() {
+  _adapter.release();
+}
+
+template <typename Adapter, typename AP>
+inline StorageHost<Adapter, AP>::StorageHost(typename Adapter::StorageType* storage, Thread* thread) : Position<AP>(), _adapter(storage, thread) {
+  bind();
+}
+
+template <typename Adapter, typename AP>
+inline StorageHost<Adapter, AP>::StorageHost(typename Adapter::StorageType* storage, size_t size) : Position<AP>(), _adapter(storage, size) {
+  bind();
+}
+
+template <typename Adapter, typename AP>
+inline StorageHost<Adapter, AP>::StorageHost(Thread* thread) : Position<AP>(), _adapter(thread) {
+  bind();
+}
+
+template <typename Adapter, typename AP>
+inline bool StorageHost<Adapter, AP>::is_valid() const {
+  return this->end_pos() != NULL;
+}
+
+template <typename Adapter, typename AP>
+inline typename Adapter::StorageType* StorageHost<Adapter, AP>::storage() {
+  return _adapter.storage();
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::set_storage(typename Adapter::StorageType* storage) {
+  _adapter.set_storage(storage);
+  bind();
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::flush() {
+  this->accommodate(this->is_valid() ? this->used_size() : 0, 0);
+}
+
+template <typename Adapter, typename AP>
+inline void StorageHost<Adapter, AP>::seek(intptr_t offset) {
+  if (this->is_valid()) {
+    assert(offset >= 0, "negative offsets not supported");
+    assert(this->start_pos() + offset <= this->end_pos(), "invariant");
+    assert(this->start_pos() + offset >= this->start_pos(), "invariant");
+    this->set_current_pos(this->start_pos() + offset);
+  }
+}
+
+#endif // SHARE_VM_JFR_WRITERS_JFRSTORAGEHOST_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrStreamWriterHost.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRSTREAMWRITERHOST_HPP
+#define SHARE_VM_JFR_WRITERS_JFRSTREAMWRITERHOST_HPP
+
+#include "jfr/utilities/jfrTypes.hpp"
+#include "jfr/writers/jfrMemoryWriterHost.inline.hpp"
+
+template <typename Adapter, typename AP> // Adapter and AllocationPolicy
+class StreamWriterHost : public MemoryWriterHost<Adapter, AP> {
+ public:
+  typedef typename Adapter::StorageType StorageType;
+ private:
+  intptr_t _stream_pos;
+  fio_fd _fd;
+  intptr_t current_stream_position() const;
+
+ protected:
+  StreamWriterHost(StorageType* storage, Thread* thread);
+  StreamWriterHost(StorageType* storage, size_t size);
+  StreamWriterHost(Thread* thread);
+  bool accommodate(size_t used, size_t requested);
+  void bytes(void* dest, const void* src, size_t len);
+  void flush(size_t size);
+  bool has_valid_fd() const;
+
+ public:
+  intptr_t current_offset() const;
+  void seek(intptr_t offset);
+  void flush();
+  void write_unbuffered(const void* src, size_t len);
+  bool is_valid() const;
+  void close_fd();
+  void reset(fio_fd fd);
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRSTREAMWRITERHOST_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrStreamWriterHost.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRSTREAMWRITERHOST_INLINE_HPP
+#define SHARE_VM_JFR_WRITERS_JFRSTREAMWRITERHOST_INLINE_HPP
+
+#include "jfr/writers/jfrStreamWriterHost.hpp"
+#include "runtime/os.hpp"
+
+template <typename Adapter, typename AP>
+StreamWriterHost<Adapter, AP>::StreamWriterHost(typename Adapter::StorageType* storage, Thread* thread) :
+  MemoryWriterHost<Adapter, AP>(storage, thread), _stream_pos(0), _fd(invalid_fd) {
+}
+
+template <typename Adapter, typename AP>
+StreamWriterHost<Adapter, AP>::StreamWriterHost(typename Adapter::StorageType* storage, size_t size) :
+  MemoryWriterHost<Adapter, AP>(storage, size), _stream_pos(0), _fd(invalid_fd) {
+}
+
+template <typename Adapter, typename AP>
+StreamWriterHost<Adapter, AP>::StreamWriterHost(Thread* thread) :
+  MemoryWriterHost<Adapter, AP>(thread), _stream_pos(0), _fd(invalid_fd) {
+}
+
+template <typename Adapter, typename AP>
+inline intptr_t StreamWriterHost<Adapter, AP>::current_stream_position() const {
+  return this->used_offset() + _stream_pos;
+}
+
+template <typename Adapter, typename AP>
+inline bool StreamWriterHost<Adapter, AP>::accommodate(size_t used, size_t requested) {
+  if (used > 0) {
+    this->flush(used);
+  }
+  assert(this->used_size() == 0, "invariant");
+  if (this->available_size() >= requested) {
+    return true;
+  }
+  return StorageHost<Adapter, AP>::accommodate(0, requested);
+}
+
+template <typename Adapter, typename AP>
+inline void StreamWriterHost<Adapter, AP>::bytes(void* dest, const void* buf, size_t len) {
+  if (len > this->available_size()) {
+    this->write_unbuffered(buf, len);
+    return;
+  }
+  MemoryWriterHost<Adapter, AP>::bytes(dest, buf, len);
+}
+
+template <typename Adapter, typename AP>
+inline void StreamWriterHost<Adapter, AP>::flush(size_t size) {
+  assert(size > 0, "invariant");
+  assert(this->is_valid(), "invariant");
+  _stream_pos += os::write(_fd, this->start_pos(), (int)size);
+  StorageHost<Adapter, AP>::reset();
+  assert(0 == this->used_offset(), "invariant");
+}
+
+template <typename Adapter, typename AP>
+inline bool StreamWriterHost<Adapter, AP>::has_valid_fd() const {
+  return invalid_fd != _fd;
+}
+
+template <typename Adapter, typename AP>
+inline intptr_t StreamWriterHost<Adapter, AP>::current_offset() const {
+  return current_stream_position();
+}
+
+template <typename Adapter, typename AP>
+void StreamWriterHost<Adapter, AP>::seek(intptr_t offset) {
+  this->flush();
+  assert(0 == this->used_offset(), "can only seek from beginning");
+  _stream_pos = os::seek_to_file_offset(_fd, offset);
+}
+
+template <typename Adapter, typename AP>
+void StreamWriterHost<Adapter, AP>::flush() {
+  if (this->is_valid()) {
+    const size_t used = this->used_size();
+    if (used > 0) {
+      this->flush(used);
+    }
+  }
+}
+
+template <typename Adapter, typename AP>
+void StreamWriterHost<Adapter, AP>::write_unbuffered(const void* buf, size_t len) {
+  this->flush();
+  assert(0 == this->used_offset(), "can only seek from beginning");
+  while (len > 0) {
+    const int n = MIN2<int>((int)len, INT_MAX);
+    _stream_pos += os::write(_fd, buf, n);
+    len -= n;
+  }
+}
+
+template <typename Adapter, typename AP>
+inline bool StreamWriterHost<Adapter, AP>::is_valid() const {
+  return has_valid_fd();
+}
+
+template <typename Adapter, typename AP>
+inline void StreamWriterHost<Adapter, AP>::close_fd() {
+  assert(this->has_valid_fd(), "closing invalid fd!");
+  os::close(_fd);
+  _fd = invalid_fd;
+}
+
+template <typename Adapter, typename AP>
+inline void StreamWriterHost<Adapter, AP>::reset(fio_fd fd) {
+  assert(!this->has_valid_fd(), "invariant");
+  _fd = fd;
+  _stream_pos = 0;
+  this->hard_reset();
+}
+
+#endif // SHARE_VM_JFR_WRITERS_JFRSTREAMWRITERHOST_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrWriterHost.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRWRITERHOST_HPP
+#define SHARE_VM_JFR_WRITERS_JFRWRITERHOST_HPP
+
+#include "jni.h"
+#include "utilities/globalDefinitions.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+
+class ClassLoaderData;
+class Klass;
+class Method;
+// XXX class PackageEntry;
+class Symbol;
+class Thread;
+
+// BE == Base Encoder
+// IE == Integer Encoder
+template <typename BE, typename IE, typename WriterPolicyImpl >
+class WriterHost : public WriterPolicyImpl {
+ private:
+  const bool _compressed_integers;
+
+  template <typename T>
+  void write_padded(T value);
+  template <typename T>
+  void write_padded(const T* value, size_t len);
+  template <typename T>
+  u1* write_padded(const T* value, size_t len, u1* pos);
+  template <typename T>
+  void write(const T* value, size_t len);
+  template <typename T>
+  u1* write(const T* value, size_t len, u1* pos);
+  void write_utf8(const char* value);
+  void write_utf16(const jchar* value, jint len);
+
+ protected:
+  template <typename T>
+  void be_write(T value);
+  template <typename T>
+  void be_write(const T* value, size_t len);
+  template <typename StorageType>
+  WriterHost(StorageType* storage, Thread* thread);
+  template <typename StorageType>
+  WriterHost(StorageType* storage, size_t size);
+  WriterHost(Thread* thread);
+  u1* ensure_size(size_t requested_size);
+
+ public:
+  template <typename T>
+  void write(T value);
+  void write(bool value);
+  void write(float value);
+  void write(double value);
+  void write(const char* value);
+  void write(char* value);
+  void write(jstring value);
+  void write(const ClassLoaderData* cld);
+  void write(const Klass* klass);
+  void write(const Method* method);
+// XXX  void write(const PackageEntry* package);
+  void write(const Symbol* symbol);
+  void write(const Ticks& time);
+  void write(const Tickspan& time);
+  void write(const JfrTicks& time);
+  void write(const JfrTickspan& time);
+  void bytes(const void* buf, size_t len);
+  void write_utf8_u2_len(const char* value);
+  template <typename T>
+  void write_padded_at_offset(T value, intptr_t offset);
+  template <typename T>
+  void write_at_offset(T value, intptr_t offset);
+  template <typename T>
+  void write_be_at_offset(T value, intptr_t offset);
+  intptr_t reserve(size_t size);
+};
+
+#endif // SHARE_VM_JFR_WRITERS_JFRWRITERHOST_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/writers/jfrWriterHost.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_JFR_WRITERS_JFRWRITERHOST_INLINE_HPP
+#define SHARE_VM_JFR_WRITERS_JFRWRITERHOST_INLINE_HPP
+
+#include "classfile/javaClasses.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/writers/jfrEncoding.hpp"
+#include "jfr/writers/jfrWriterHost.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.hpp"
+#include "oops/symbol.hpp"
+//#include "oops/typeArrayOop.inline.hpp" - XXX
+
+inline bool compressed_integers() {
+  static const bool comp_integers = JfrOptionSet::compressed_integers();
+  return comp_integers;
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_padded(T value) {
+  write_padded(&value, 1);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_padded(const T* value, size_t len) {
+  assert(value != NULL, "invariant");
+  assert(len > 0, "invariant");
+  u1* const pos = ensure_size(sizeof(T) * len);
+  if (pos) {
+    this->set_current_pos(write_padded(value, len, pos));
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename T>
+inline u1* WriterHost<BE, IE, WriterPolicyImpl>::write_padded(const T* value, size_t len, u1* pos) {
+  assert(value != NULL, "invariant");
+  assert(len > 0, "invariant");
+  assert(pos != NULL, "invariant");
+  return _compressed_integers ? IE::write_padded(value, len, pos) : BE::write_padded(value, len, pos);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(const T* value, size_t len) {
+  assert(value != NULL, "invariant");
+  assert(len > 0, "invariant");
+  u1* const pos = ensure_size(sizeof(T) * len);
+  if (pos) {
+    this->set_current_pos(write(value, len, pos));
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename T>
+inline u1* WriterHost<BE, IE, WriterPolicyImpl>::write(const T* value, size_t len, u1* pos) {
+  assert(value != NULL, "invariant");
+  assert(len > 0, "invariant");
+  assert(pos != NULL, "invariant");
+  return _compressed_integers ? IE::write(value, len, pos) : BE::write(value, len, pos);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write_utf8(const char* value) {
+  if (NULL == value) {
+    // only write encoding byte indicating NULL string
+    write<u1>(NULL_STRING);
+    return;
+  }
+  write<u1>(UTF8); // designate encoding
+  const jint len = MIN2<jint>(max_jint, (jint)strlen(value));
+  write(len);
+  if (len > 0) {
+    be_write(value, len);
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write_utf16(const jchar* value, jint len) {
+  assert(value != NULL, "invariant");
+  write((u1)UTF16); // designate encoding
+  write(len);
+  if (len > 0) {
+    write(value, len);
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::be_write(T value) {
+  u1* const pos = ensure_size(sizeof(T));
+  if (pos) {
+    this->set_current_pos(BE::be_write(&value, 1, pos));
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::be_write(const T* value, size_t len) {
+  assert(value != NULL, "invariant");
+  assert(len > 0, "invariant");
+  u1* const pos = ensure_size(sizeof(T) * len);
+  if (pos) {
+    this->set_current_pos(BE::be_write(value, len, pos));
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename StorageType>
+inline WriterHost<BE, IE, WriterPolicyImpl>::WriterHost(StorageType* storage, Thread* thread) :
+  WriterPolicyImpl(storage, thread),
+  _compressed_integers(compressed_integers()) {
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+template <typename StorageType>
+inline WriterHost<BE, IE, WriterPolicyImpl>::WriterHost(StorageType* storage, size_t size) :
+  WriterPolicyImpl(storage, size),
+  _compressed_integers(compressed_integers()) {
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl >
+inline WriterHost<BE, IE, WriterPolicyImpl>::WriterHost(Thread* thread) :
+  WriterPolicyImpl(thread),
+  _compressed_integers(compressed_integers()) {
+}
+
+// Extra size added as a safety cushion when dimensioning memory.
+// With varint encoding, the worst case is
+// associated with writing negative values.
+// For example, writing a negative s1 (-1)
+// will encode as 0xff 0x0f (2 bytes).
+// In this example, the sizeof(T) == 1 and length == 1,
+// but the implementation will need to dimension
+// 2 bytes for the encoding.
+// Hopefully, negative values should be relatively rare.
+static const size_t size_safety_cushion = 1;
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline u1* WriterHost<BE, IE, WriterPolicyImpl>::ensure_size(size_t requested) {
+  if (!this->is_valid()) {
+    // cancelled
+    return NULL;
+  }
+  if (this->available_size() < requested + size_safety_cushion) {
+    if (!this->accommodate(this->used_size(), requested + size_safety_cushion)) {
+      this->cancel();
+      return NULL;
+    }
+  }
+  assert(requested + size_safety_cushion <= this->available_size(), "invariant");
+  return this->current_pos();
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(T value) {
+  write(&value, 1);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(bool value) {
+  be_write((u1)value);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(float value) {
+  be_write(*(u4*)&(value));
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(double value) {
+  be_write(*(u8*)&(value));
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(const char* value) {
+  // UTF-8, max_jint len
+  write_utf8(value);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(char* value) {
+  write(const_cast<const char*>(value));
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write(jstring string) {
+  if (string == NULL) {
+    write<u1>(NULL_STRING);
+    return;
+  }
+  const oop string_oop = JNIHandles::resolve_external_guard(string);
+  assert(string_oop != NULL, "invariant");
+  const size_t length = (size_t)java_lang_String::length(string_oop);
+  if (0 == length) {
+    write<u1>(EMPTY_STRING);
+    return;
+  }
+  const bool is_latin1_encoded = false;
+  const typeArrayOop value = java_lang_String::value(string_oop);
+  assert(value != NULL, "invariant");
+  if (is_latin1_encoded) {
+    write<u1>(LATIN1);
+    write<u4>((u4)length);
+    be_write(value->byte_at_addr(0), length);
+  } else {
+    write<u1>(UTF16);
+    write<u4>((u4)length);
+    write(value->char_at_addr(0), length);
+  }
+}
+
+template <typename Writer, typename T>
+inline void tag_write(Writer* w, const T* t) {
+  assert(w != NULL, "invariant");
+  const traceid id = t == NULL ? 0 : JfrTraceId::use(t);
+  w->write(id);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const ClassLoaderData* cld) {
+  tag_write(this, cld);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const Klass* klass) {
+  tag_write(this, klass);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const Method* method) {
+  tag_write(this, method);
+}
+
+// XXX
+// template <typename BE, typename IE, typename WriterPolicyImpl>
+// void WriterHost<BE, IE, WriterPolicyImpl>::write(const PackageEntry* package) {
+//   tag_write(this, package);
+// }
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const Symbol* symbol) {
+  ResourceMark rm;
+  write_utf8(symbol != NULL ? symbol->as_C_string() : NULL);
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const Ticks& time) {
+  write((u8)JfrTime::is_ft_enabled() ? time.ft_value() : time.value());
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const Tickspan& time) {
+  write((u8)JfrTime::is_ft_enabled() ? time.ft_value() : time.value());
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const JfrTicks& time) {
+  write((u8)time.value());
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::write(const JfrTickspan& time) {
+  write((u8)time.value());
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+void WriterHost<BE, IE, WriterPolicyImpl>::bytes(const void* buf, size_t len) {
+  u1* const pos = this->ensure_size(len);
+  if (pos != NULL) {
+    WriterPolicyImpl::bytes(pos, buf, len); // WriterPolicyImpl responsible for position update
+  }
+}
+
+// UTF-8 for use with classfile/bytecodes
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_utf8_u2_len(const char* value) {
+  u2 len = 0;
+  if (value != NULL) {
+    len = MIN2<u2>(max_jushort, (u2)strlen(value));
+  }
+  write(len);
+  if (len > 0) {
+    be_write(value, len);
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+inline intptr_t WriterHost<BE, IE, WriterPolicyImpl>::reserve(size_t size) {
+  if (ensure_size(size) != NULL) {
+    intptr_t reserved_offset = this->current_offset();
+    this->set_current_pos(size);
+    return reserved_offset;
+  }
+  this->cancel();
+  return 0;
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_padded_at_offset(T value, intptr_t offset) {
+  if (this->is_valid()) {
+    const intptr_t current = this->current_offset();
+    this->seek(offset);
+    write_padded(value);
+    this->seek(current); // restore
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_at_offset(T value, intptr_t offset) {
+  if (this->is_valid()) {
+    const intptr_t current = this->current_offset();
+    this->seek(offset);
+    write(value);
+    this->seek(current); // restore
+  }
+}
+
+template <typename BE, typename IE, typename WriterPolicyImpl>
+template <typename T>
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_be_at_offset(T value, intptr_t offset) {
+  if (this->is_valid()) {
+    const intptr_t current = this->current_offset();
+    this->seek(offset);
+    be_write(value);
+    this->seek(current); // restore
+  }
+}
+
+#endif // SHARE_VM_JFR_WRITERS_JFRWRITERHOST_INLINE_HPP
+
--- a/src/share/vm/memory/defNewGeneration.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/memory/defNewGeneration.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -551,10 +551,10 @@
   return allocate(size, is_tlab);
 }
 
-void DefNewGeneration::adjust_desired_tenuring_threshold() {
+void DefNewGeneration::adjust_desired_tenuring_threshold(GCTracer &tracer) {
   // Set the desired survivor size to half the real survivor space
   _tenuring_threshold =
-    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
+    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, tracer);
 }
 
 void DefNewGeneration::collect(bool   full,
@@ -664,7 +664,7 @@
 
     assert(to()->is_empty(), "to space should be empty now");
 
-    adjust_desired_tenuring_threshold();
+    adjust_desired_tenuring_threshold(gc_tracer);
 
     // A successful scavenge should restart the GC time limit count which is
     // for full GC's.
--- a/src/share/vm/memory/defNewGeneration.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/memory/defNewGeneration.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -129,7 +129,7 @@
   }
 
   // Tenuring
-  void adjust_desired_tenuring_threshold();
+  void adjust_desired_tenuring_threshold(GCTracer &tracer);
 
   // Spaces
   EdenSpace*       _eden_space;
--- a/src/share/vm/memory/genCollectedHeap.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -59,6 +59,9 @@
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
 #endif // INCLUDE_ALL_GCS
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif // INCLUDE_JFR
 
 GenCollectedHeap* GenCollectedHeap::_gch;
 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
@@ -747,6 +750,7 @@
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
   JNIHandles::weak_oops_do(root_closure);
+  JFR_ONLY(Jfr::weak_oops_do(root_closure));
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->ref_processor()->weak_oops_do(root_closure);
   }
--- a/src/share/vm/memory/metaspaceTracer.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/memory/metaspaceTracer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -24,10 +24,9 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoaderData.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/metaspaceTracer.hpp"
 #include "oops/oop.inline.hpp"
-#include "trace/tracing.hpp"
-#include "trace/traceBackend.hpp"
 
 void MetaspaceTracer::report_gc_threshold(size_t old_val,
                                           size_t new_val,
@@ -67,9 +66,9 @@
       event.set_anonymousClassLoader(true);
     } else {
       if (cld->is_the_null_class_loader_data()) {
-        event.set_classLoader((Klass*) NULL);
+        event.set_classLoader(NULL);
       } else {
-        event.set_classLoader(cld->class_loader()->klass());
+        event.set_classLoader(cld);
       }
       event.set_anonymousClassLoader(false);
     }
--- a/src/share/vm/memory/referenceProcessor.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -34,6 +34,9 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/jniHandles.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif // INCLUDE_JFR
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -300,6 +303,7 @@
   }
 #endif
   JNIHandles::weak_oops_do(is_alive, keep_alive);
+  JFR_ONLY(Jfr::weak_oops_do(is_alive, keep_alive));
   complete_gc->do_void();
 }
 
--- a/src/share/vm/oops/arrayKlass.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/oops/arrayKlass.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -94,6 +94,7 @@
   int vtable_size = Universe::base_vtable_size();
   set_vtable_length(vtable_size);
   set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5)
+  JFR_ONLY(INIT_ID(this);)
 }
 
 
--- a/src/share/vm/oops/instanceKlass.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/oops/instanceKlass.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -38,7 +38,9 @@
 #include "utilities/accessFlags.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/macros.hpp"
-#include "trace/traceMacros.hpp"
+#if INCLUDE_JFR
+#include "jfr/support/jfrKlassExtension.hpp"
+#endif
 
 // An InstanceKlass is the VM level representation of a Java class.
 // It contains all information needed for at class at execution runtime.
@@ -833,7 +835,7 @@
 
   // support for stub routines
   static ByteSize init_state_offset()  { return in_ByteSize(offset_of(InstanceKlass, _init_state)); }
-  TRACE_DEFINE_OFFSET;
+  JFR_ONLY(DEFINE_KLASS_TRACE_ID_OFFSET;)
   static ByteSize init_thread_offset() { return in_ByteSize(offset_of(InstanceKlass, _init_thread)); }
 
   // subclass/subinterface checks
--- a/src/share/vm/oops/klass.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/oops/klass.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -38,7 +38,6 @@
 #include "oops/oop.inline2.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
-#include "trace/traceMacros.hpp"
 #include "utilities/stack.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
@@ -47,6 +46,9 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #endif // INCLUDE_ALL_GCS
+#if INCLUDE_JFR
+#include "jfr/support/jfrTraceIdExtension.hpp"
+#endif
 
 bool Klass::is_cloneable() const {
   return _access_flags.is_cloneable() ||
@@ -197,7 +199,6 @@
   set_subklass(NULL);
   set_next_sibling(NULL);
   set_next_link(NULL);
-  TRACE_INIT_ID(this);
 
   set_prototype_header(markOopDesc::prototype());
   set_biased_lock_revocation_count(0);
@@ -526,6 +527,7 @@
 void Klass::remove_unshareable_info() {
   assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
 
+  JFR_ONLY(REMOVE_ID(this);)
   set_subklass(NULL);
   set_next_sibling(NULL);
   // Clear the java mirror
@@ -537,7 +539,7 @@
 }
 
 void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
-  TRACE_INIT_ID(this);
+  JFR_ONLY(RESTORE_ID(this);)
   // If an exception happened during CDS restore, some of these fields may already be
   // set.  We leave the class on the CLD list, even if incomplete so that we don't
   // modify the CLD list outside a safepoint.
--- a/src/share/vm/oops/klass.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/oops/klass.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,7 +32,6 @@
 #include "oops/klassPS.hpp"
 #include "oops/metadata.hpp"
 #include "oops/oop.hpp"
-#include "trace/traceMacros.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
@@ -40,6 +39,9 @@
 #include "gc_implementation/g1/g1OopClosures.hpp"
 #include "gc_implementation/parNew/parOopClosures.hpp"
 #endif // INCLUDE_ALL_GCS
+#if INCLUDE_JFR
+#include "jfr/support/jfrTraceIdExtension.hpp"
+#endif
 
 //
 // A Klass provides:
@@ -170,7 +172,7 @@
   markOop  _prototype_header;   // Used when biased locking is both enabled and disabled for this type
   jint     _biased_lock_revocation_count;
 
-  TRACE_DEFINE_KLASS_TRACE_ID;
+  JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
 
   // Remembered sets support for the oops in the klasses.
   jbyte _modified_oops;             // Card Table Equivalent (YC/CMS support)
@@ -613,7 +615,7 @@
   jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
   void  set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
 
-  TRACE_DEFINE_KLASS_METHODS;
+  JFR_ONLY(DEFINE_TRACE_ID_METHODS;)
 
   // garbage collection support
   virtual void oops_do(OopClosure* cl);
--- a/src/share/vm/oops/method.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/oops/method.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -37,6 +37,10 @@
 #include "oops/typeArrayOop.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_JFR
+#include "jfr/support/jfrTraceIdExtension.hpp"
+#endif
 
 // A Method* represents a Java method.
 //
@@ -116,6 +120,8 @@
                     _has_injected_profile : 1,
                                           : 2;
 
+  JFR_ONLY(DEFINE_TRACE_FLAG;)
+
 #ifndef PRODUCT
   int               _compiled_invocation_count;  // Number of nmethod invocations so far (for perf. debugging)
 #endif
@@ -805,6 +811,8 @@
   bool     has_injected_profile()       { return _has_injected_profile;     }
   void set_has_injected_profile(bool x) {        _has_injected_profile = x; }
 
+  JFR_ONLY(DEFINE_TRACE_FLAG_ACCESSOR;)
+
   ConstMethod::MethodType method_type() const {
       return _constMethod->method_type();
   }
--- a/src/share/vm/opto/bytecodeInfo.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -29,6 +29,7 @@
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "opto/callGenerator.hpp"
 #include "opto/parse.hpp"
@@ -479,9 +480,28 @@
   return NULL;
 }
 
+static void post_inlining_event(int compile_id,const char* msg, bool success, int bci, ciMethod* caller, ciMethod* callee) {
+  assert(caller != NULL, "invariant");
+  assert(callee != NULL, "invariant");
+  EventCompilerInlining event;
+  if (event.should_commit()) {
+    JfrStructCalleeMethod callee_struct;
+    callee_struct.set_type(callee->holder()->name()->as_utf8());
+    callee_struct.set_name(callee->name()->as_utf8());
+    callee_struct.set_descriptor(callee->signature()->as_symbol()->as_utf8());
+    event.set_compileId(compile_id);
+    event.set_message(msg);
+    event.set_succeeded(success);
+    event.set_bci(bci);
+    event.set_caller(caller->get_Method());
+    event.set_callee(callee_struct);
+    event.commit();
+  }
+}
+
 //------------------------------print_inlining---------------------------------
 void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
-                                bool success) const {
+                                ciMethod* caller_method, bool success) const {
   const char* inline_msg = msg();
   assert(inline_msg != NULL, "just checking");
   if (C->log() != NULL) {
@@ -500,6 +520,7 @@
       //tty->print("  bcs: %d+%d  invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
     }
   }
+  post_inlining_event(C->compile_id(), inline_msg, success, caller_bci, caller_method, callee_method);
 }
 
 //------------------------------ok_to_inline-----------------------------------
@@ -522,14 +543,14 @@
   // Do some initial checks.
   if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
     set_msg("failed initial checks");
-    print_inlining(callee_method, caller_bci, false /* !success */);
+    print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
     return NULL;
   }
 
   // Do some parse checks.
   set_msg(check_can_parse(callee_method));
   if (msg() != NULL) {
-    print_inlining(callee_method, caller_bci, false /* !success */);
+    print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
     return NULL;
   }
 
@@ -571,7 +592,7 @@
     if (msg() == NULL) {
       set_msg("inline (hot)");
     }
-    print_inlining(callee_method, caller_bci, true /* success */);
+    print_inlining(callee_method, caller_bci, caller_method, true /* success */);
     build_inline_tree_for_callee(callee_method, jvms, caller_bci);
     if (InlineWarmCalls && !wci.is_hot())
       return new (C) WarmCallInfo(wci);  // copy to heap
@@ -582,7 +603,7 @@
   if (msg() == NULL) {
     set_msg("too cold to inline");
   }
-  print_inlining(callee_method, caller_bci, false /* !success */ );
+  print_inlining(callee_method, caller_bci, caller_method, false /* !success */ );
   return NULL;
 }
 
--- a/src/share/vm/opto/compile.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/opto/compile.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,6 +32,7 @@
 #include "compiler/compileLog.hpp"
 #include "compiler/disassembler.hpp"
 #include "compiler/oopMap.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "opto/addnode.hpp"
 #include "opto/block.hpp"
 #include "opto/c2compiler.hpp"
@@ -65,7 +66,6 @@
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/timer.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #if defined AD_MD_HPP
 # include AD_MD_HPP
@@ -81,7 +81,6 @@
 # include "adfiles/ad_ppc_64.hpp"
 #endif
 
-
 // -------------------- Compile::mach_constant_base_node -----------------------
 // Constant table base node singleton.
 MachConstantBaseNode* Compile::mach_constant_base_node() {
@@ -3595,13 +3594,6 @@
     _failure_reason = reason;
   }
 
-  EventCompilerFailure event;
-  if (event.should_commit()) {
-    event.set_compileID(Compile::compile_id());
-    event.set_failure(reason);
-    event.commit();
-  }
-
   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
     C->print_method(PHASE_FAILURE);
   }
--- a/src/share/vm/opto/compile.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/opto/compile.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -31,6 +31,7 @@
 #include "code/exceptionHandlerTable.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "compiler/compileBroker.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "libadt/dict.hpp"
 #include "libadt/port.hpp"
 #include "libadt/vectset.hpp"
@@ -41,7 +42,6 @@
 #include "opto/regmask.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/vmThread.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/ticks.hpp"
 
 class Block;
@@ -637,7 +637,7 @@
     if (event.should_commit()) {
       event.set_starttime(C->_latest_stage_start_counter);
       event.set_phase((u1) cpt);
-      event.set_compileID(C->_compile_id);
+      event.set_compileId(C->_compile_id);
       event.set_phaseLevel(level);
       event.commit();
     }
@@ -654,7 +654,7 @@
     if (event.should_commit()) {
       event.set_starttime(C->_latest_stage_start_counter);
       event.set_phase((u1) PHASE_END);
-      event.set_compileID(C->_compile_id);
+      event.set_compileId(C->_compile_id);
       event.set_phaseLevel(level);
       event.commit();
     }
--- a/src/share/vm/opto/library_call.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/opto/library_call.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -27,6 +27,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
+#include "jfr/support/jfrIntrinsics.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callGenerator.hpp"
@@ -40,7 +41,7 @@
 #include "opto/subnode.hpp"
 #include "prims/nativeLookup.hpp"
 #include "runtime/sharedRuntime.hpp"
-#include "trace/traceMacros.hpp"
+#include "utilities/macros.hpp"
 
 class LibraryIntrinsic : public InlineCallGenerator {
   // Extend the set of intrinsics known to the runtime:
@@ -236,9 +237,9 @@
   bool inline_unsafe_allocate();
   bool inline_unsafe_copyMemory();
   bool inline_native_currentThread();
-#ifdef TRACE_HAVE_INTRINSICS
+#ifdef JFR_HAVE_INTRINSICS
   bool inline_native_classID();
-  bool inline_native_threadID();
+  bool inline_native_getEventWriter();
 #endif
   bool inline_native_time_funcs(address method, const char* funcName);
   bool inline_native_isInterrupted();
@@ -879,10 +880,10 @@
   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
   case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
 
-#ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_classID:                  return inline_native_classID();
-  case vmIntrinsics::_threadID:                 return inline_native_threadID();
-  case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
+#ifdef JFR_HAVE_INTRINSICS
+  case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
+  case vmIntrinsics::_getClassId:               return inline_native_classID();
+  case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 #endif
   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
@@ -3243,51 +3244,76 @@
   return true;
 }
 
-#ifdef TRACE_HAVE_INTRINSICS
+#ifdef JFR_HAVE_INTRINSICS
 /*
  * oop -> myklass
  * myklass->trace_id |= USED
  * return myklass->trace_id & ~0x3
  */
 bool LibraryCallKit::inline_native_classID() {
-  null_check_receiver();  // null-check, then ignore
-  Node* cls = null_check(argument(1), T_OBJECT);
+  Node* cls = null_check(argument(0), T_OBJECT);
   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
   kls = null_check(kls, T_OBJECT);
-  ByteSize offset = TRACE_ID_OFFSET;
+
+  ByteSize offset = KLASS_TRACE_ID_OFFSET;
   Node* insp = basic_plus_adr(kls, in_bytes(offset));
   Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
-  Node* bits = longcon(~0x03l); // ignore bit 0 & 1
-  Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits));
+
   Node* clsused = longcon(0x01l); // set the class bit
   Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
-
   const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
   store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
-  set_result(andl);
+
+#ifdef TRACE_ID_META_BITS
+  Node* mbits = longcon(~TRACE_ID_META_BITS);
+  tvalue = _gvn.transform(new (C) AndLNode(tvalue, mbits));
+#endif
+#ifdef TRACE_ID_SHIFT
+  Node* cbits = intcon(TRACE_ID_SHIFT);
+  tvalue = _gvn.transform(new (C) URShiftLNode(tvalue, cbits));
+#endif
+
+  set_result(tvalue);
   return true;
 }
 
-bool LibraryCallKit::inline_native_threadID() {
-  Node* tls_ptr = NULL;
-  Node* cur_thr = generate_current_thread(tls_ptr);
-  Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
-  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
-  p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
-
-  Node* threadid = NULL;
-  size_t thread_id_size = OSThread::thread_id_size();
-  if (thread_id_size == (size_t) BytesPerLong) {
-    threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered));
-  } else if (thread_id_size == (size_t) BytesPerInt) {
-    threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered);
-  } else {
-    ShouldNotReachHere();
-  }
-  set_result(threadid);
+bool LibraryCallKit::inline_native_getEventWriter() {
+  Node* tls_ptr = _gvn.transform(new (C) ThreadLocalNode());
+
+  Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
+                                  in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR)
+                                  );
+
+  Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
+
+  Node* jobj_cmp_null = _gvn.transform( new (C) CmpPNode(jobj, null()) );
+  Node* test_jobj_eq_null  = _gvn.transform( new (C) BoolNode(jobj_cmp_null, BoolTest::eq) );
+
+  IfNode* iff_jobj_null =
+    create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);
+
+  enum { _normal_path = 1,
+         _null_path = 2,
+         PATH_LIMIT };
+
+  RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
+  PhiNode*    result_val = new (C) PhiNode(result_rgn, TypePtr::BOTTOM);
+
+  Node* jobj_is_null = _gvn.transform(new (C) IfTrueNode(iff_jobj_null));
+  result_rgn->init_req(_null_path, jobj_is_null);
+  result_val->init_req(_null_path, null());
+
+  Node* jobj_is_not_null = _gvn.transform(new (C) IfFalseNode(iff_jobj_null));
+  result_rgn->init_req(_normal_path, jobj_is_not_null);
+
+  Node* res = make_load(jobj_is_not_null, jobj, TypeInstPtr::NOTNULL, T_OBJECT, MemNode::unordered);
+  result_val->init_req(_normal_path, res);
+
+  set_result(result_rgn, result_val);
+
   return true;
 }
-#endif
+#endif // JFR_HAVE_INTRINSICS
 
 //------------------------inline_native_time_funcs--------------
 // inline code for System.currentTimeMillis() and System.nanoTime()
--- a/src/share/vm/opto/parse.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/opto/parse.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -87,7 +87,7 @@
                                 JVMState* jvms,
                                 WarmCallInfo* wci_result);
   void        print_inlining(ciMethod* callee_method, int caller_bci,
-                             bool success) const;
+                             ciMethod* caller_method, bool success) const;
 
   InlineTree* caller_tree()       const { return _caller_tree;  }
   InlineTree* callee_at(int bci, ciMethod* m) const;
--- a/src/share/vm/opto/superword.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/opto/superword.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -201,6 +201,32 @@
   static const SWNodeInfo initial;
 };
 
+
+// JVMCI: OrderedPair is moved up to deal with compilation issues on Windows
+//------------------------------OrderedPair---------------------------
+// Ordered pair of Node*.
+class OrderedPair VALUE_OBJ_CLASS_SPEC {
+ protected:
+  Node* _p1;
+  Node* _p2;
+ public:
+  OrderedPair() : _p1(NULL), _p2(NULL) {}
+  OrderedPair(Node* p1, Node* p2) {
+    if (p1->_idx < p2->_idx) {
+      _p1 = p1; _p2 = p2;
+    } else {
+      _p1 = p2; _p2 = p1;
+    }
+  }
+
+  bool operator==(const OrderedPair &rhs) {
+    return _p1 == rhs._p1 && _p2 == rhs._p2;
+  }
+  void print() { tty->print("  (%d, %d)", _p1->_idx, _p2->_idx); }
+
+  static const OrderedPair initial;
+};
+
 // -----------------------------SuperWord---------------------------------
 // Transforms scalar operations into packed (superword) operations.
 class SuperWord : public ResourceObj {
@@ -505,29 +531,4 @@
   void print();
 };
 
-
-//------------------------------OrderedPair---------------------------
-// Ordered pair of Node*.
-class OrderedPair VALUE_OBJ_CLASS_SPEC {
- protected:
-  Node* _p1;
-  Node* _p2;
- public:
-  OrderedPair() : _p1(NULL), _p2(NULL) {}
-  OrderedPair(Node* p1, Node* p2) {
-    if (p1->_idx < p2->_idx) {
-      _p1 = p1; _p2 = p2;
-    } else {
-      _p1 = p2; _p2 = p1;
-    }
-  }
-
-  bool operator==(const OrderedPair &rhs) {
-    return _p1 == rhs._p1 && _p2 == rhs._p2;
-  }
-  void print() { tty->print("  (%d, %d)", _p1->_idx, _p2->_idx); }
-
-  static const OrderedPair initial;
-};
-
 #endif // SHARE_VM_OPTO_SUPERWORD_HPP
--- a/src/share/vm/prims/jni.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/prims/jni.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,6 +32,8 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/support/jfrThreadId.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
 #if INCLUDE_ALL_GCS
@@ -76,7 +78,6 @@
 #include "runtime/vm_operations.hpp"
 #include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
@@ -5019,6 +5020,14 @@
   return &jni_NativeInterface;
 }
 
+static void post_thread_start_event(const JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  EventThreadStart event;
+  if (event.should_commit()) {
+    event.set_thread(JFR_THREAD_ID(jt));
+    event.commit();
+  }
+}
 
 // Invocation API
 
@@ -5241,11 +5250,7 @@
        JvmtiExport::post_thread_start(thread);
     }
 
-    EventThreadStart event;
-    if (event.should_commit()) {
-      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
-      event.commit();
-    }
+    post_thread_start_event(thread);
 
 #ifndef PRODUCT
   #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
@@ -5456,11 +5461,7 @@
     JvmtiExport::post_thread_start(thread);
   }
 
-  EventThreadStart event;
-  if (event.should_commit()) {
-    event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
-    event.commit();
-  }
+  post_thread_start_event(thread);
 
   *(JNIEnv**)penv = thread->jni_environment();
 
--- a/src/share/vm/prims/jvm.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/prims/jvm.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -37,6 +37,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/bytecode.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/referenceType.hpp"
 #include "memory/universe.inline.hpp"
@@ -67,7 +68,6 @@
 #include "services/attachListener.hpp"
 #include "services/management.hpp"
 #include "services/threadService.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
@@ -3276,6 +3276,12 @@
   }
 JVM_END
 
+static void post_thread_sleep_event(EventThreadSleep* event, jlong millis) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  event->set_time(millis);
+  event->commit();
+}
 
 JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis))
   JVMWrapper("JVM_Sleep");
@@ -3322,8 +3328,7 @@
       // us while we were sleeping. We do not overwrite those.
       if (!HAS_PENDING_EXCEPTION) {
         if (event.should_commit()) {
-          event.set_time(millis);
-          event.commit();
+          post_thread_sleep_event(&event, millis);
         }
 #ifndef USDT2
         HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1);
@@ -3339,8 +3344,7 @@
     thread->osthread()->set_state(old_state);
   }
   if (event.should_commit()) {
-    event.set_time(millis);
-    event.commit();
+    post_thread_sleep_event(&event, millis);
   }
 #ifndef USDT2
   HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0);
--- a/src/share/vm/prims/nativeLookup.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/prims/nativeLookup.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -41,6 +41,9 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "utilities/macros.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -136,6 +139,9 @@
   { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) },
   { CC"Java_sun_misc_Perf_registerNatives",                        NULL, FN_PTR(JVM_RegisterPerfMethods)         },
   { CC"Java_sun_hotspot_WhiteBox_registerNatives",                 NULL, FN_PTR(JVM_RegisterWhiteBoxMethods)     },
+#if INCLUDE_JFR
+  { CC"Java_jdk_jfr_internal_JVM_registerNatives",                 NULL, FN_PTR(jfr_register_natives)            },
+#endif
 };
 
 static address lookup_special_native(char* jni_name) {
--- a/src/share/vm/prims/unsafe.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/prims/unsafe.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -28,6 +28,7 @@
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #endif // INCLUDE_ALL_GCS
+#include "jfr/jfrEvents.hpp"
 #include "memory/allocation.inline.hpp"
 #include "prims/jni.h"
 #include "prims/jvm.h"
@@ -38,7 +39,6 @@
 #include "runtime/reflection.hpp"
 #include "runtime/synchronizer.hpp"
 #include "services/threadService.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/dtrace.hpp"
 
@@ -1236,6 +1236,15 @@
 #endif
 UNSAFE_END
 
+static void post_thread_park_event(EventThreadPark* event, const oop obj, jlong timeout) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  event->set_parkedClass((obj != NULL) ? obj->klass() : NULL);
+  event->set_timeout(timeout);
+  event->set_address((obj != NULL) ? (u8)cast_from_oop<uintptr_t>(obj) : 0);
+  event->commit();
+}
+
 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
   UnsafeWrapper("Unsafe_Park");
   EventThreadPark event;
@@ -1254,11 +1263,7 @@
                           (uintptr_t) thread->parker());
 #endif /* USDT2 */
   if (event.should_commit()) {
-    oop obj = thread->current_park_blocker();
-    event.set_klass((obj != NULL) ? obj->klass() : NULL);
-    event.set_timeout(time);
-    event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
-    event.commit();
+    post_thread_park_event(&event, thread->current_park_blocker(), time);
   }
 UNSAFE_END
 
--- a/src/share/vm/runtime/arguments.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -44,6 +44,9 @@
 #include "utilities/macros.hpp"
 #include "utilities/stringUtils.hpp"
 #include "utilities/taskqueue.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -155,6 +158,20 @@
   }
 }
 
+#if INCLUDE_JFR
+// return true on failure
+static bool match_jfr_option(const JavaVMOption** option) {
+  assert((*option)->optionString != NULL, "invariant");
+  char* tail = NULL;
+  if (match_option(*option, "-XX:StartFlightRecording", (const char**)&tail)) {
+    return Jfr::on_start_flight_recording_option(option, tail);
+  } else if (match_option(*option, "-XX:FlightRecorderOptions", (const char**)&tail)) {
+    return Jfr::on_flight_recorder_option(option, tail);
+  }
+  return false;
+}
+#endif
+
 static void logOption(const char* opt) {
   if (PrintVMOptions) {
     jio_fprintf(defaultStream::output_stream(), "VM option '%s'\n", opt);
@@ -3399,6 +3416,10 @@
           "ManagementServer is not supported in this VM.\n");
         return JNI_ERR;
 #endif // INCLUDE_MANAGEMENT
+#if INCLUDE_JFR
+    } else if (match_jfr_option(&option)) {
+      return JNI_EINVAL;
+#endif
     } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
       // Skip -XX:Flags= since that case has already been handled
       if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
--- a/src/share/vm/runtime/frame.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/frame.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -239,6 +239,19 @@
   return NULL;
 }
 
+bool frame::is_entry_frame_valid(JavaThread* thread) const {
+  // Validate the JavaCallWrapper an entry frame must have
+  address jcw = (address)entry_frame_call_wrapper();
+  bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)fp()); // less than stack base
+  if (!jcw_safe) {
+    return false;
+  }
+
+  // Validate sp saved in the java frame anchor
+  JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
+  return (jfa->last_Java_sp() > sp());
+}
+
 bool frame::should_be_deoptimized() const {
   if (_deopt_state == is_deoptimized ||
       !is_compiled_frame() ) return false;
--- a/src/share/vm/runtime/frame.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/frame.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -181,6 +181,8 @@
   frame sender_for_interpreter_frame(RegisterMap* map) const;
   frame sender_for_native_frame(RegisterMap* map) const;
 
+  bool is_entry_frame_valid(JavaThread* thread) const;
+
   // All frames:
 
   // A low-level interface for vframes:
--- a/src/share/vm/runtime/globals.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/globals.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
@@ -31,7 +32,6 @@
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/top.hpp"
-#include "trace/tracing.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1_globals.hpp"
 #endif // INCLUDE_ALL_GCS
@@ -610,8 +610,8 @@
 {
   E e;
   e.set_name(name);
-  e.set_old_value(old_value);
-  e.set_new_value(new_value);
+  e.set_oldValue(old_value);
+  e.set_newValue(new_value);
   e.set_origin(origin);
   e.commit();
 }
--- a/src/share/vm/runtime/globals.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/globals.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -204,6 +204,10 @@
 
 #endif // no compilers
 
+#if !INCLUDE_JFR
+#define LogJFR false
+#endif
+
 // string type aliases used only in this file
 typedef const char* ccstr;
 typedef const char* ccstrlist;   // represents string arguments which accumulate
@@ -3982,15 +3986,27 @@
           "Allocation less than this value will be allocated "              \
           "using malloc. Larger allocations will use mmap.")                \
                                                                             \
-  product(bool, EnableTracing, false,                                       \
-          "Enable event-based tracing")                                     \
-                                                                            \
-  product(bool, UseLockedTracing, false,                                    \
-          "Use locked-tracing when doing event-based tracing")              \
-                                                                            \
   product_pd(bool, PreserveFramePointer,                                    \
              "Use the FP register for holding the frame pointer "           \
-             "and not as a general purpose register.")
+             "and not as a general purpose register.")                      \
+                                                                            \
+  JFR_ONLY(product(bool, FlightRecorder, false,                             \
+          "Enable Flight Recorder"))                                        \
+                                                                            \
+  JFR_ONLY(product(ccstr, FlightRecorderOptions, NULL,                      \
+          "Flight Recorder options"))                                       \
+                                                                            \
+  JFR_ONLY(product(ccstr, StartFlightRecording, NULL,                       \
+          "Start flight recording with options"))                           \
+                                                                            \
+  JFR_ONLY(product(bool, UnlockCommercialFeatures, false,                   \
+          "This flag is ignored. Left for compatibility"))                  \
+                                                                            \
+  experimental(bool, UseFastUnorderedTimeStamps, false,                     \
+          "Use platform unstable time where supported for timestamps only") \
+                                                                            \
+  JFR_ONLY(product(bool, LogJFR, false,                                     \
+          "Enable JFR logging (consider +Verbose)"))                        \
 
 /*
  *  Macros for factoring of globals
--- a/src/share/vm/runtime/handles.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/handles.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -72,7 +72,8 @@
   while (bottom < top) {
     // This test can be moved up but for now check every oop.
 
-    assert((*bottom)->is_oop(), "handle should point to oop");
+    // JFR is known to set mark word to 0 for duration of leak analysis VM operaiton
+    assert((*bottom)->is_oop(INCLUDE_JFR), "handle should point to oop");
 
     f->do_oop(bottom++);
   }
--- a/src/share/vm/runtime/java.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/java.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -30,6 +30,8 @@
 #include "compiler/compileBroker.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/support/jfrThreadId.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.hpp"
@@ -58,7 +60,6 @@
 #include "runtime/timer.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/memTracker.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/histogram.hpp"
@@ -95,6 +96,9 @@
 #include "opto/indexSet.hpp"
 #include "opto/runtime.hpp"
 #endif
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif
 
 #ifndef USDT2
 HS_DTRACE_PROBE_DECL(hotspot, vm__shutdown);
@@ -523,10 +527,12 @@
 
   EventThreadEnd event;
   if (event.should_commit()) {
-      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
-      event.commit();
+    event.set_thread(JFR_THREAD_ID(thread));
+    event.commit();
   }
 
+  JFR_ONLY(Jfr::on_vm_shutdown();)
+
   // Always call even when there are not JVMTI environments yet, since environments
   // may be attached late and JVMTI must track phases of VM execution
   JvmtiExport::post_vm_death();
--- a/src/share/vm/runtime/mutexLocker.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/mutexLocker.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -126,12 +126,16 @@
 Monitor* Service_lock                 = NULL;
 Monitor* PeriodicTask_lock            = NULL;
 
-#ifdef INCLUDE_TRACE
+#ifdef INCLUDE_JFR
 Mutex*   JfrStacktrace_lock           = NULL;
 Monitor* JfrMsg_lock                  = NULL;
 Mutex*   JfrBuffer_lock               = NULL;
 Mutex*   JfrStream_lock               = NULL;
 Mutex*   JfrThreadGroups_lock         = NULL;
+
+#ifndef SUPPORTS_NATIVE_CX8
+Mutex*   JfrCounters_lock             = NULL;
+#endif
 #endif
 
 #ifndef SUPPORTS_NATIVE_CX8
@@ -280,12 +284,16 @@
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
   def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
 
-#ifdef INCLUDE_TRACE
+#ifdef INCLUDE_JFR
   def(JfrMsg_lock                  , Monitor, leaf,        true);
   def(JfrBuffer_lock               , Mutex,   leaf,        true);
   def(JfrThreadGroups_lock         , Mutex,   leaf,        true);
   def(JfrStream_lock               , Mutex,   nonleaf,     true);
   def(JfrStacktrace_lock           , Mutex,   special,     true);
+
+#ifndef SUPPORTS_NATIVE_CX8
+  def(JfrCounters_lock             , Mutex,   special,     false);
+#endif
 #endif
 
 #ifndef SUPPORTS_NATIVE_CX8
--- a/src/share/vm/runtime/mutexLocker.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/mutexLocker.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -142,12 +142,17 @@
 extern Monitor* Service_lock;                    // a lock used for service thread operation
 extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
 
-#ifdef INCLUDE_TRACE
+#ifdef INCLUDE_JFR
 extern Mutex*   JfrStacktrace_lock;              // used to guard access to the JFR stacktrace table
 extern Monitor* JfrMsg_lock;                     // protects JFR messaging
 extern Mutex*   JfrBuffer_lock;                  // protects JFR buffer operations
 extern Mutex*   JfrStream_lock;                  // protects JFR stream access
 extern Mutex*   JfrThreadGroups_lock;            // protects JFR access to Thread Groups
+
+#ifndef SUPPORTS_NATIVE_CX8
+extern Mutex*   JfrCounters_lock;                // provides atomic updates of JFR counters
+#endif
+
 #endif
 
 #ifndef SUPPORTS_NATIVE_CX8
--- a/src/share/vm/runtime/objectMonitor.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/objectMonitor.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/support/jfrThreadId.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
@@ -37,8 +39,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/threadService.hpp"
-#include "trace/tracing.hpp"
-#include "trace/traceMacros.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
@@ -54,6 +54,9 @@
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
+#if INCLUDE_JFR
+#include "jfr/support/jfrFlush.hpp"
+#endif
 
 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
@@ -376,7 +379,12 @@
   // Ensure the object-monitor relationship remains stable while there's contention.
   Atomic::inc_ptr(&_count);
 
+  JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
   EventJavaMonitorEnter event;
+  if (event.should_commit()) {
+    event.set_monitorClass(((oop)this->object())->klass());
+    event.set_address((uintptr_t)(this->object_addr()));
+  }
 
   { // Change java thread status to indicate blocked on monitor enter.
     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
@@ -465,9 +473,7 @@
   }
 
   if (event.should_commit()) {
-    event.set_klass(((oop)this->object())->klass());
-    event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
-    event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
+    event.set_previousOwner((uintptr_t)_previous_owner_tid);
     event.commit();
   }
 
@@ -990,11 +996,11 @@
       _Responsible = NULL ;
    }
 
-#if INCLUDE_TRACE
+#if INCLUDE_JFR
    // get the owner's thread id for the MonitorEnter event
    // if it is enabled and the thread isn't suspended
-   if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
-     _previous_owner_tid = SharedRuntime::get_java_tid(Self);
+   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
+    _previous_owner_tid = JFR_THREAD_ID(Self);
    }
 #endif
 
@@ -1443,15 +1449,17 @@
 }
 
 // helper method for posting a monitor wait event
-void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
-                                                           jlong notifier_tid,
-                                                           jlong timeout,
-                                                           bool timedout) {
-  event->set_klass(((oop)this->object())->klass());
-  event->set_timeout((TYPE_ULONG)timeout);
-  event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
-  event->set_notifier((TYPE_OSTHREAD)notifier_tid);
-  event->set_timedOut((TYPE_BOOLEAN)timedout);
+static void post_monitor_wait_event(EventJavaMonitorWait* event,
+                                    ObjectMonitor* monitor,
+                                    jlong notifier_tid,
+                                    jlong timeout,
+                                    bool timedout) {
+  assert(monitor != NULL, "invariant");
+  event->set_monitorClass(((oop)monitor->object())->klass());
+  event->set_timeout(timeout);
+  event->set_address((uintptr_t)monitor->object_addr());
+  event->set_notifier((u8)notifier_tid);
+  event->set_timedOut(timedout);
   event->commit();
 }
 
@@ -1489,7 +1497,7 @@
         // this ObjectMonitor.
      }
      if (event.should_commit()) {
-       post_monitor_wait_event(&event, 0, millis, false);
+       post_monitor_wait_event(&event, this, 0, millis, false);
      }
      TEVENT (Wait - Throw IEX) ;
      THROW(vmSymbols::java_lang_InterruptedException());
@@ -1633,7 +1641,7 @@
      }
 
      if (event.should_commit()) {
-       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
+       post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
      }
 
      OrderAccess::fence() ;
@@ -1716,7 +1724,7 @@
      }
      iterator->_notified = 1 ;
      Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
+     iterator->_notifier_tid = JFR_THREAD_ID(Self);
 
      ObjectWaiter * List = _EntryList ;
      if (List != NULL) {
@@ -1842,7 +1850,7 @@
      guarantee (iterator->_notified == 0, "invariant") ;
      iterator->_notified = 1 ;
      Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
+     iterator->_notifier_tid = JFR_THREAD_ID(Self);
      if (Policy != 4) {
         iterator->TState = ObjectWaiter::TS_ENTER ;
      }
--- a/src/share/vm/runtime/objectMonitor.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/objectMonitor.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -55,9 +55,6 @@
   void wait_reenter_end(ObjectMonitor *mon);
 };
 
-// forward declaration to avoid include tracing.hpp
-class EventJavaMonitorWait;
-
 // WARNING:
 //   This is a very sensitive and fragile class. DO NOT make any
 // change unless you are fully aware of the underlying semantics.
@@ -224,10 +221,6 @@
   void      ctAsserts () ;
   void      ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ;
   bool      ExitSuspendEquivalent (JavaThread * Self) ;
-  void      post_monitor_wait_event(EventJavaMonitorWait * event,
-                                                   jlong notifier_tid,
-                                                   jlong timeout,
-                                                   bool timedout);
 
  private:
   friend class ObjectSynchronizer;
--- a/src/share/vm/runtime/os.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/os.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -556,6 +556,7 @@
   //File i/o operations
 
   static size_t read(int fd, void *buf, unsigned int nBytes);
+  static size_t read_at(int fd, void *buf, unsigned int nBytes, jlong offset);
   static size_t restartable_read(int fd, void *buf, unsigned int nBytes);
   static size_t write(int fd, const void *buf, unsigned int nBytes);
 
@@ -604,6 +605,16 @@
   // Unload library
   static void  dll_unload(void *lib);
 
+  // Callback for loaded module information
+  // Input parameters:
+  //    char*     module_file_name,
+  //    address   module_base_addr,
+  //    address   module_top_addr,
+  //    void*     param
+  typedef int (*LoadedModulesCallbackFunc)(const char *, address, address, void *);
+
+  static int get_loaded_modules_info(LoadedModulesCallbackFunc callback, void *param);
+
   // Return the handle of this process
   static void* get_default_process_handle();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/os_perf.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_OS_PERF_HPP
+#define SHARE_VM_RUNTIME_OS_PERF_HPP
+
+#include "utilities/macros.hpp"
+#include "memory/allocation.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#define FUNCTIONALITY_NOT_IMPLEMENTED -8
+
+// XXX
+//class EnvironmentVariable : public CHeapObj<mtInternal> {
+// public:
+//  char* _key;
+//  char* _value;
+//
+//  EnvironmentVariable() {
+//    _key = NULL;
+//    _value = NULL;
+//  }
+//
+//  ~EnvironmentVariable() {
+//    if (_key != NULL) {
+//      FREE_C_HEAP_ARRAY(char, _key);
+//    }
+//    if (_value != NULL) {
+//      FREE_C_HEAP_ARRAY(char, _value);
+//    }
+//  }
+//
+//  EnvironmentVariable(char* key, char* value) {
+//    _key = key;
+//    _value = value;
+//  }
+//
+//};
+
+
+class CPUInformation : public CHeapObj<mtInternal> {
+ private:
+  int   _no_of_sockets;
+  int   _no_of_cores;
+  int   _no_of_hw_threads;
+  const char* _description;
+  const char* _name;
+
+ public:
+  CPUInformation() {
+    _no_of_sockets = 0;
+    _no_of_cores = 0;
+    _no_of_hw_threads = 0;
+    _description = NULL;
+    _name = NULL;
+  }
+
+  int number_of_sockets(void) const {
+    return _no_of_sockets;
+  }
+
+  void set_number_of_sockets(int no_of_sockets) {
+    _no_of_sockets = no_of_sockets;
+  }
+
+  int number_of_cores(void) const {
+    return _no_of_cores;
+  }
+
+  void set_number_of_cores(int no_of_cores) {
+    _no_of_cores = no_of_cores;
+  }
+
+  int number_of_hardware_threads(void) const {
+    return _no_of_hw_threads;
+  }
+
+  void set_number_of_hardware_threads(int no_of_hw_threads) {
+    _no_of_hw_threads = no_of_hw_threads;
+  }
+
+  const char* cpu_name(void)  const {
+    return _name;
+  }
+
+  void set_cpu_name(const char* cpu_name) {
+    _name = cpu_name;
+  }
+
+  const char* cpu_description(void) const {
+    return _description;
+  }
+
+  void set_cpu_description(const char* cpu_description) {
+    _description = cpu_description;
+  }
+};
+
+class SystemProcess : public CHeapObj<mtInternal> {
+ private:
+  int   _pid;
+  char* _name;
+  char* _path;
+  char* _command_line;
+  SystemProcess* _next;
+
+ public:
+  SystemProcess() {
+    _pid  = 0;
+    _name = NULL;
+    _path = NULL;
+    _command_line = NULL;
+    _next = NULL;
+  }
+
+  SystemProcess(int pid, char* name, char* path, char* command_line, SystemProcess* next) {
+    _pid = pid;
+    _name = name;
+    _path = path;
+    _command_line = command_line;
+    _next = next;
+  }
+
+  void set_next(SystemProcess* sys_process) {
+    _next = sys_process;
+  }
+
+  SystemProcess* next(void) const {
+    return _next;
+  }
+
+  int pid(void) const {
+    return _pid;
+  }
+
+  void set_pid(int pid) {
+    _pid = pid;
+  }
+
+  const char* name(void) const {
+    return _name;
+  }
+
+  void set_name(char* name) {
+    _name = name;
+  }
+
+  const char* path(void) const {
+    return _path;
+  }
+
+  void set_path(char* path) {
+    _path = path;
+  }
+
+  const char* command_line(void) const {
+    return _command_line;
+  }
+
+  void set_command_line(char* command_line) {
+    _command_line = command_line;
+  }
+
+  virtual ~SystemProcess(void) {
+    if (_name != NULL) {
+      FREE_C_HEAP_ARRAY(char, _name, mtInternal);
+    }
+    if (_path != NULL) {
+      FREE_C_HEAP_ARRAY(char, _path, mtInternal);
+    }
+    if (_command_line != NULL) {
+      FREE_C_HEAP_ARRAY(char, _command_line, mtInternal);
+    }
+  }
+};
+
+class NetworkInterface : public ResourceObj {
+ private:
+  char* _name;
+  uint64_t _bytes_in;
+  uint64_t _bytes_out;
+  NetworkInterface* _next;
+
+  NetworkInterface(); // no impl
+  NetworkInterface(const NetworkInterface& rhs); // no impl
+  NetworkInterface& operator=(const NetworkInterface& rhs); // no impl
+ public:
+  NetworkInterface(const char* name, uint64_t bytes_in, uint64_t bytes_out, NetworkInterface* next) :
+  _name(NULL),
+  _bytes_in(bytes_in),
+  _bytes_out(bytes_out),
+  _next(next) {
+    assert(name != NULL, "invariant");
+    const size_t length = strlen(name);
+    assert(allocated_on_res_area(), "invariant");
+    _name = NEW_RESOURCE_ARRAY(char, length + 1);
+    strncpy(_name, name, length + 1);
+    assert(strncmp(_name, name, length) == 0, "invariant");
+  }
+
+  NetworkInterface* next() const {
+    return _next;
+  }
+
+  const char* get_name() const {
+    return _name;
+  }
+
+  uint64_t get_bytes_out() const {
+    return _bytes_out;
+  }
+
+  uint64_t get_bytes_in() const {
+    return _bytes_in;
+  }
+};
+
+class CPUInformationInterface : public CHeapObj<mtInternal> {
+ private:
+  CPUInformation* _cpu_info;
+ public:
+  CPUInformationInterface();
+  bool initialize();
+  ~CPUInformationInterface();
+  int cpu_information(CPUInformation& cpu_info);
+};
+
+class CPUPerformanceInterface : public CHeapObj<mtInternal> {
+ private:
+  class CPUPerformance;
+  CPUPerformance* _impl;
+ public:
+  CPUPerformanceInterface();
+  ~CPUPerformanceInterface();
+  bool initialize();
+
+  int cpu_load(int which_logical_cpu, double* const cpu_load) const;
+  int context_switch_rate(double* const rate) const;
+  int cpu_load_total_process(double* const cpu_load) const;
+  int cpu_loads_process(double* const pjvmUserLoad,
+                        double* const pjvmKernelLoad,
+                        double* const psystemTotalLoad) const;
+};
+
+class SystemProcessInterface : public CHeapObj<mtInternal> {
+ private:
+   class SystemProcesses;
+   SystemProcesses* _impl;
+ public:
+   SystemProcessInterface();
+   ~SystemProcessInterface();
+   bool initialize();
+
+  // information about system processes
+  int system_processes(SystemProcess** system_procs, int* const no_of_sys_processes) const;
+};
+
+class NetworkPerformanceInterface : public CHeapObj<mtInternal> {
+ private:
+  class NetworkPerformance;
+  NetworkPerformance* _impl;
+  NetworkPerformanceInterface(const NetworkPerformanceInterface& rhs); // no impl
+  NetworkPerformanceInterface& operator=(const NetworkPerformanceInterface& rhs); // no impl
+ public:
+  NetworkPerformanceInterface();
+  bool initialize();
+  ~NetworkPerformanceInterface();
+  int network_utilization(NetworkInterface** network_interfaces) const;
+};
+
+#endif // SHARE_VM_RUNTIME_OS_PERF_HPP
--- a/src/share/vm/runtime/safepoint.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,6 +32,7 @@
 #include "code/scopeDesc.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "interpreter/interpreter.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -83,6 +84,73 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+template <typename E>
+static void set_current_safepoint_id(E* event, int adjustment = 0) {
+  assert(event != NULL, "invariant");
+  event->set_safepointId(SafepointSynchronize::safepoint_counter() + adjustment);
+}
+
+static void post_safepoint_begin_event(EventSafepointBegin* event,
+                                       int thread_count,
+                                       int critical_thread_count) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  set_current_safepoint_id(event);
+  event->set_totalThreadCount(thread_count);
+  event->set_jniCriticalThreadCount(critical_thread_count);
+  event->commit();
+}
+
+static void post_safepoint_cleanup_event(EventSafepointCleanup* event) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  set_current_safepoint_id(event);
+  event->commit();
+}
+
+static void post_safepoint_synchronize_event(EventSafepointStateSynchronization* event,
+                                             int initial_number_of_threads,
+                                             int threads_waiting_to_block,
+                                             unsigned int iterations) {
+  assert(event != NULL, "invariant");
+  if (event->should_commit()) {
+    // Group this event together with the ones committed after the counter is increased
+    set_current_safepoint_id(event, 1);
+    event->set_initialThreadCount(initial_number_of_threads);
+    event->set_runningThreadCount(threads_waiting_to_block);
+    event->set_iterations(iterations);
+    event->commit();
+  }
+}
+
+static void post_safepoint_wait_blocked_event(EventSafepointWaitBlocked* event,
+                                              int initial_threads_waiting_to_block) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  set_current_safepoint_id(event);
+  event->set_runningThreadCount(initial_threads_waiting_to_block);
+  event->commit();
+}
+
+static void post_safepoint_cleanup_task_event(EventSafepointCleanupTask* event,
+                                              const char* name) {
+  assert(event != NULL, "invariant");
+  if (event->should_commit()) {
+    set_current_safepoint_id(event);
+    event->set_name(name);
+    event->commit();
+  }
+}
+
+static void post_safepoint_end_event(EventSafepointEnd* event) {
+  assert(event != NULL, "invariant");
+  if (event->should_commit()) {
+    // Group this event together with the ones committed before the counter increased
+    set_current_safepoint_id(event, -1);
+    event->commit();
+  }
+}
+
 // --------------------------------------------------------------------------------------------------
 // Implementation of Safepoint begin/end
 
@@ -97,7 +165,7 @@
 
 // Roll all threads forward to a safepoint and suspend them all
 void SafepointSynchronize::begin() {
-
+  EventSafepointBegin begin_event;
   Thread* myThread = Thread::current();
   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
 
@@ -189,6 +257,9 @@
   //     between states, the safepointing code will wait for the thread to
   //     block itself when it attempts transitions to a new state.
   //
+  EventSafepointStateSynchronization sync_event;
+  int initial_running = 0;
+
   _state            = _synchronizing;
   OrderAccess::fence();
 
@@ -243,8 +314,11 @@
       }
     }
 
-    if (PrintSafepointStatistics && iterations == 0) {
-      begin_statistics(nof_threads, still_running);
+    if (iterations == 0) {
+      initial_running = still_running;
+      if (PrintSafepointStatistics) {
+        begin_statistics(nof_threads, still_running);
+      }
     }
 
     if (still_running > 0) {
@@ -336,43 +410,56 @@
     update_statistics_on_spin_end();
   }
 
+  if (sync_event.should_commit()) {
+    post_safepoint_synchronize_event(&sync_event, initial_running, _waiting_to_block, iterations);
+  }
+
   // wait until all threads are stopped
-  while (_waiting_to_block > 0) {
-    if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
-    if (!SafepointTimeout || timeout_error_printed) {
-      Safepoint_lock->wait(true);  // true, means with no safepoint checks
-    } else {
-      // Compute remaining time
-      jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
+  {
+    EventSafepointWaitBlocked wait_blocked_event;
+    int initial_waiting_to_block = _waiting_to_block;
 
-      // If there is no remaining time, then there is an error
-      if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
-        print_safepoint_timeout(_blocking_timeout);
+    while (_waiting_to_block > 0) {
+      if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
+      if (!SafepointTimeout || timeout_error_printed) {
+        Safepoint_lock->wait(true);  // true, means with no safepoint checks
+      } else {
+        // Compute remaining time
+        jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
+
+        // If there is no remaining time, then there is an error
+        if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
+          print_safepoint_timeout(_blocking_timeout);
+        }
       }
     }
-  }
-  assert(_waiting_to_block == 0, "sanity check");
+    assert(_waiting_to_block == 0, "sanity check");
 
 #ifndef PRODUCT
-  if (SafepointTimeout) {
-    jlong current_time = os::javaTimeNanos();
-    if (safepoint_limit_time < current_time) {
-      tty->print_cr("# SafepointSynchronize: Finished after "
-                    INT64_FORMAT_W(6) " ms",
-                    ((current_time - safepoint_limit_time) / MICROUNITS +
-                     SafepointTimeoutDelay));
+    if (SafepointTimeout) {
+      jlong current_time = os::javaTimeNanos();
+      if (safepoint_limit_time < current_time) {
+        tty->print_cr("# SafepointSynchronize: Finished after "
+                      INT64_FORMAT_W(6) " ms",
+                      ((current_time - safepoint_limit_time) / MICROUNITS +
+                       SafepointTimeoutDelay));
+      }
+    }
+#endif
+
+    assert((_safepoint_counter & 0x1) == 0, "must be even");
+    assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
+    _safepoint_counter ++;
+
+    // Record state
+    _state = _synchronized;
+
+    OrderAccess::fence();
+
+    if (wait_blocked_event.should_commit()) {
+      post_safepoint_wait_blocked_event(&wait_blocked_event, initial_waiting_to_block);
     }
   }
-#endif
-
-  assert((_safepoint_counter & 0x1) == 0, "must be even");
-  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
-  _safepoint_counter ++;
-
-  // Record state
-  _state = _synchronized;
-
-  OrderAccess::fence();
 
 #ifdef ASSERT
   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
@@ -395,12 +482,22 @@
   }
 
   // Call stuff that needs to be run when a safepoint is just about to be completed
-  do_cleanup_tasks();
+  {
+    EventSafepointCleanup cleanup_event;
+    do_cleanup_tasks();
+    if (cleanup_event.should_commit()) {
+      post_safepoint_cleanup_event(&cleanup_event);
+    }
+  }
 
   if (PrintSafepointStatistics) {
     // Record how much time spend on the above cleanup tasks
     update_statistics_on_cleanup_end(os::javaTimeNanos());
   }
+
+  if (begin_event.should_commit()) {
+    post_safepoint_begin_event(&begin_event, nof_threads, _current_jni_active_count);
+  }
 }
 
 // Wake up all threads, so they are ready to resume execution after the safepoint
@@ -409,6 +506,7 @@
 
   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
   assert((_safepoint_counter & 0x1) == 1, "must be odd");
+  EventSafepointEnd event;
   _safepoint_counter ++;
   // memory fence isn't required here since an odd _safepoint_counter
   // value can do no harm and a fence is issued below anyway.
@@ -494,6 +592,9 @@
   // record this time so VMThread can keep track how much time has elasped
   // since last safepoint.
   _end_of_last_safepoint = os::javaTimeMillis();
+  if (event.should_commit()) {
+    post_safepoint_end_event(&event);
+  }
 }
 
 bool SafepointSynchronize::is_cleanup_needed() {
@@ -507,32 +608,62 @@
 // Various cleaning tasks that should be done periodically at safepoints
 void SafepointSynchronize::do_cleanup_tasks() {
   {
-    TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime);
+    const char* name = "deflating idle monitors";
+    EventSafepointCleanupTask event;
+    TraceTime t1(name, TraceSafepointCleanupTime);
     ObjectSynchronizer::deflate_idle_monitors();
+    if (event.should_commit()) {
+      post_safepoint_cleanup_task_event(&event, name);
+    }
   }
 
   {
-    TraceTime t2("updating inline caches", TraceSafepointCleanupTime);
+    const char* name = "updating inline caches";
+    EventSafepointCleanupTask event;
+    TraceTime t2(name, TraceSafepointCleanupTime);
     InlineCacheBuffer::update_inline_caches();
+    if (event.should_commit()) {
+      post_safepoint_cleanup_task_event(&event, name);
+    }
   }
   {
-    TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);
+    const char* name = "compilation policy safepoint handler";
+    EventSafepointCleanupTask event;
+    TraceTime t3(name, TraceSafepointCleanupTime);
     CompilationPolicy::policy()->do_safepoint_work();
+    if (event.should_commit()) {
+      post_safepoint_cleanup_task_event(&event, name);
+    }
   }
 
   {
-    TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
+    const char* name = "mark nmethods";
+    EventSafepointCleanupTask event;
+    TraceTime t4(name, TraceSafepointCleanupTime);
     NMethodSweeper::mark_active_nmethods();
+    if (event.should_commit()) {
+      post_safepoint_cleanup_task_event(&event, name);
+    }
   }
 
   if (SymbolTable::needs_rehashing()) {
-    TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime);
+    const char* name = "rehashing symbol table";
+    EventSafepointCleanupTask event;
+    TraceTime t5(name, TraceSafepointCleanupTime);
     SymbolTable::rehash_table();
+    if (event.should_commit()) {
+      post_safepoint_cleanup_task_event(&event, name);
+    }
   }
 
   if (StringTable::needs_rehashing()) {
-    TraceTime t6("rehashing string table", TraceSafepointCleanupTime);
+    const char* name = "rehashing string table";
+    EventSafepointCleanupTask event;
+    TraceTime t6(name, TraceSafepointCleanupTime);
     StringTable::rehash_table();
+    if (event.should_commit()) {
+      post_safepoint_cleanup_task_event(&event, name);
+    }
   }
 
   // rotate log files?
--- a/src/share/vm/runtime/safepoint.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/safepoint.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -145,6 +145,7 @@
   // Query
   inline static bool is_at_safepoint()   { return _state == _synchronized;  }
   inline static bool is_synchronizing()  { return _state == _synchronizing;  }
+  inline static int safepoint_counter()  { return _safepoint_counter; }
 
   inline static bool do_call_back() {
     return (_state != _not_synchronized);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/semaphore.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_SEMAPHORE_HPP
+#define SHARE_VM_RUNTIME_SEMAPHORE_HPP
+
+#include "memory/allocation.hpp"
+
+#if defined(LINUX) || defined(SOLARIS) || defined(AIX)
+# include "semaphore_posix.hpp"
+#elif defined(BSD)
+# include "semaphore_bsd.hpp"
+#elif defined(_WINDOWS)
+# include "semaphore_windows.hpp"
+#else
+# error "No semaphore implementation provided for this OS"
+#endif
+
+class JavaThread;
+
+// Implements the limited, platform independent Semaphore API.
+class Semaphore : public CHeapObj<mtInternal> {
+  SemaphoreImpl _impl;
+
+  // Prevent copying and assignment of Semaphore instances.
+  Semaphore(const Semaphore&);
+  Semaphore& operator=(const Semaphore&);
+
+ public:
+  Semaphore(uint value = 0) : _impl(value) {}
+  ~Semaphore() {}
+
+  void signal(uint count = 1) { _impl.signal(count); }
+
+  void wait()                 { _impl.wait(); }
+
+  bool trywait()              { return _impl.trywait(); }
+
+  void wait_with_safepoint_check(JavaThread* thread);
+};
+
+#endif // SHARE_VM_RUNTIME_SEMAPHORE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/semaphore.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_SEMAPHORE_INLINE_HPP
+#define SHARE_VM_RUNTIME_SEMAPHORE_INLINE_HPP
+
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/semaphore.hpp"
+#include "runtime/thread.inline.hpp"
+
+inline void Semaphore::wait_with_safepoint_check(JavaThread* thread) {
+  // Prepare to block and allow safepoints while blocked
+  ThreadBlockInVM tbivm(thread);
+  OSThreadWaitState osts(thread->osthread(), false /* not in Object.wait() */);
+
+  // Wait for value
+  _impl.wait();
+}
+
+#endif // SHARE_VM_RUNTIME_SEMAPHORE_INLINE_HPP
--- a/src/share/vm/runtime/sweeper.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/sweeper.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -28,6 +28,7 @@
 #include "code/icBuffer.hpp"
 #include "code/nmethod.hpp"
 #include "compiler/compileBroker.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/method.hpp"
 #include "runtime/atomic.hpp"
@@ -38,9 +39,8 @@
 #include "runtime/sweeper.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vm_operations.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/events.hpp"
-#include "utilities/ticks.inline.hpp"
+#include "utilities/ticks.hpp"
 #include "utilities/xmlstream.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
@@ -318,6 +318,24 @@
   }
 }
 
+static void post_sweep_event(EventSweepCodeCache* event,
+                             const Ticks& start,
+                             const Ticks& end,
+                             s4 traversals,
+                             int swept,
+                             int flushed,
+                             int zombified) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  event->set_starttime(start);
+  event->set_endtime(end);
+  event->set_sweepId(traversals);
+  event->set_sweptCount(swept);
+  event->set_flushedCount(flushed);
+  event->set_zombifiedCount(zombified);
+  event->commit();
+}
+
 void NMethodSweeper::sweep_code_cache() {
   ResourceMark rm;
   Ticks sweep_start_counter = Ticks::now();
@@ -394,15 +412,7 @@
 
   EventSweepCodeCache event(UNTIMED);
   if (event.should_commit()) {
-    event.set_starttime(sweep_start_counter);
-    event.set_endtime(sweep_end_counter);
-    event.set_sweepIndex(_traversals);
-    event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
-    event.set_sweptCount(swept_count);
-    event.set_flushedCount(_flushed_count);
-    event.set_markedCount(_marked_for_reclamation_count);
-    event.set_zombifiedCount(_zombified_count);
-    event.commit();
+    post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, _flushed_count, _zombified_count);
   }
 
 #ifdef ASSERT
--- a/src/share/vm/runtime/synchronizer.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/synchronizer.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
@@ -1178,6 +1179,17 @@
     TEVENT (omFlush) ;
 }
 
+static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
+                                       const oop obj) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  event->set_monitorClass(obj->klass());
+  event->set_address((uintptr_t)(void*)obj);
+  // XXX no such counters. implement?
+//  event->set_cause((u1)cause);
+  event->commit();
+}
+
 // Fast path code shared by multiple functions
 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
   markOop mark = obj->mark();
@@ -1200,6 +1212,8 @@
   assert (Universe::verify_in_progress() ||
           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
 
+  EventJavaMonitorInflate event;
+
   for (;;) {
       const markOop mark = object->mark() ;
       assert (!mark->has_bias_pattern(), "invariant") ;
@@ -1330,6 +1344,9 @@
                 object->klass()->external_name());
             }
           }
+          if (event.should_commit()) {
+            post_monitor_inflate_event(&event, object);
+          }
           return m ;
       }
 
@@ -1380,6 +1397,9 @@
             object->klass()->external_name());
         }
       }
+      if (event.should_commit()) {
+        post_monitor_inflate_event(&event, object);
+      }
       return m ;
   }
 }
--- a/src/share/vm/runtime/thread.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/thread.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -32,6 +32,8 @@
 #include "interpreter/interpreter.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "interpreter/oopMapCache.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/support/jfrThreadId.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -77,8 +79,6 @@
 #include "services/management.hpp"
 #include "services/memTracker.hpp"
 #include "services/threadService.hpp"
-#include "trace/tracing.hpp"
-#include "trace/traceMacros.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
@@ -111,6 +111,9 @@
 #if INCLUDE_RTM_OPT
 #include "runtime/rtmLocking.hpp"
 #endif
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -342,7 +345,7 @@
   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
   ObjectSynchronizer::omFlush (this) ;
 
-  EVENT_THREAD_DESTRUCT(this);
+  JFR_ONLY(Jfr::on_thread_destruct(this);)
 
   // stack_base can be NULL if the thread is never started or exited before
   // record_stack_base_and_size called. Although, we would like to ensure
@@ -1671,7 +1674,7 @@
 
   EventThreadStart event;
   if (event.should_commit()) {
-     event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+    event.set_thread(JFR_THREAD_ID(this));
      event.commit();
   }
 
@@ -1805,12 +1808,12 @@
     // from java_lang_Thread object
     EventThreadEnd event;
     if (event.should_commit()) {
-        event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
-        event.commit();
+      event.set_thread(JFR_THREAD_ID(this));
+      event.commit();
     }
 
     // Call after last event on thread
-    EVENT_THREAD_EXIT(this);
+    JFR_ONLY(Jfr::on_thread_exit(this);)
 
     // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
     // the execution of the method. If that is not enough, then we don't really care. Thread.stop
@@ -2186,6 +2189,8 @@
   if (check_asyncs) {
     check_and_handle_async_exceptions();
   }
+
+  JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(this);)
 }
 
 void JavaThread::send_thread_stop(oop java_throwable)  {
@@ -2424,6 +2429,8 @@
       fatal("missed deoptimization!");
     }
   }
+
+  JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);)
 }
 
 // Slow path when the native==>VM/Java barriers detect a safepoint is in
@@ -3312,6 +3319,14 @@
   if (wt != NULL)
     tc->do_thread(wt);
 
+#if INCLUDE_JFR
+  Thread* sampler_thread = Jfr::sampler_thread();
+  if (sampler_thread != NULL) {
+    tc->do_thread(sampler_thread);
+  }
+
+#endif
+
   // If CompilerThreads ever become non-JavaThreads, add them here
 }
 
@@ -3438,6 +3453,8 @@
     return status;
   }
 
+  JFR_ONLY(Jfr::on_vm_init();)
+
   // Should be done after the heap is fully created
   main_thread->cache_global_variables();
 
@@ -3565,11 +3582,6 @@
 
   quicken_jni_functions();
 
-  // Must be run after init_ft which initializes ft_enabled
-  if (TRACE_INITIALIZE() != JNI_OK) {
-    vm_exit_during_initialization("Failed to initialize tracing backend");
-  }
-
   // Set flag that basic initialization has completed. Used by exceptions and various
   // debug stuff, that does not work until all basic classes have been initialized.
   set_init_completed();
@@ -3638,9 +3650,7 @@
   // Notify JVMTI agents that VM initialization is complete - nop if no agents.
   JvmtiExport::post_vm_initialized();
 
-  if (TRACE_START() != JNI_OK) {
-    vm_exit_during_initialization("Failed to start tracing backend.");
-  }
+  JFR_ONLY(Jfr::on_vm_start();)
 
   if (CleanChunkPoolAsync) {
     Chunk::start_chunk_pool_cleaner_task();
--- a/src/share/vm/runtime/thread.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/thread.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -42,8 +42,6 @@
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/thread_ext.hpp"
 #include "runtime/unhandledOops.hpp"
-#include "trace/traceBackend.hpp"
-#include "trace/traceMacros.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/top.hpp"
@@ -54,6 +52,9 @@
 #ifdef TARGET_ARCH_zero
 # include "stack_zero.hpp"
 #endif
+#if INCLUDE_JFR
+#include "jfr/support/jfrThreadExtension.hpp"
+#endif
 
 class ThreadSafepointState;
 class ThreadProfiler;
@@ -260,7 +261,7 @@
   // Thread-local buffer used by MetadataOnStackMark.
   MetadataOnStackBuffer* _metadata_on_stack_buffer;
 
-  TRACE_DATA _trace_data;                       // Thread-local data for tracing
+  JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
 
   ThreadExt _ext;
 
@@ -441,7 +442,7 @@
   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
   inline jlong cooked_allocated_bytes();
 
-  TRACE_DATA* trace_data()              { return &_trace_data; }
+  JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
 
   const ThreadExt& ext() const          { return _ext; }
   ThreadExt& ext()                      { return _ext; }
@@ -626,6 +627,8 @@
 
   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes ); }
 
+  JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
+
  public:
   volatile intptr_t _Stalled ;
   volatile int _TypeTag ;
--- a/src/share/vm/runtime/vmStructs.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/vmStructs.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -169,10 +169,6 @@
 #include "gc_implementation/g1/vmStructs_g1.hpp"
 #endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_TRACE
- #include "runtime/vmStructs_trace.hpp"
-#endif
-
 #ifdef COMPILER2
 #include "opto/addnode.hpp"
 #include "opto/block.hpp"
@@ -2908,11 +2904,6 @@
                 GENERATE_STATIC_VM_STRUCT_ENTRY)
 #endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_TRACE
-  VM_STRUCTS_TRACE(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
-                GENERATE_STATIC_VM_STRUCT_ENTRY)
-#endif
-
   VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
                  GENERATE_STATIC_VM_STRUCT_ENTRY,
                  GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY,
@@ -2958,11 +2949,6 @@
               GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 #endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_TRACE
-  VM_TYPES_TRACE(GENERATE_VM_TYPE_ENTRY,
-              GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
-#endif
-
   VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
                GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
                GENERATE_OOP_VM_TYPE_ENTRY,
@@ -2998,10 +2984,6 @@
   VM_INT_CONSTANTS_PARNEW(GENERATE_VM_INT_CONSTANT_ENTRY)
 #endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_TRACE
-  VM_INT_CONSTANTS_TRACE(GENERATE_VM_INT_CONSTANT_ENTRY)
-#endif
-
   VM_INT_CONSTANTS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY,
                        GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
                        GENERATE_C1_VM_INT_CONSTANT_ENTRY,
@@ -3067,11 +3049,6 @@
 
 #endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_TRACE
-  VM_STRUCTS_TRACE(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
-                CHECK_STATIC_VM_STRUCT_ENTRY);
-#endif
-
   VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
                  CHECK_STATIC_VM_STRUCT_ENTRY,
                  CHECK_NO_OP,
@@ -3113,11 +3090,6 @@
 
 #endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_TRACE
-  VM_TYPES_TRACE(CHECK_VM_TYPE_ENTRY,
-              CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
-#endif
-
   VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
                CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
                CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
@@ -3181,11 +3153,6 @@
                            ENSURE_FIELD_TYPE_PRESENT));
 #endif // INCLUDE_ALL_GCS
 
-#if INCLUDE_TRACE
-  debug_only(VM_STRUCTS_TRACE(ENSURE_FIELD_TYPE_PRESENT,
-                           ENSURE_FIELD_TYPE_PRESENT));
-#endif
-
   debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT,
                             ENSURE_FIELD_TYPE_PRESENT,
                             CHECK_NO_OP,
--- a/src/share/vm/runtime/vmStructs_trace.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP
-#define SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP
-
-#define VM_INT_CONSTANTS_TRACE(a)
-
-#define VM_STRUCTS_TRACE(a, b)
-
-#define VM_TYPES_TRACE(a, b)
-
-
-#endif // SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP
--- a/src/share/vm/runtime/vmThread.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/vmThread.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -25,6 +25,8 @@
 #include "precompiled.hpp"
 #include "compiler/compileBroker.hpp"
 #include "gc_interface/collectedHeap.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/support/jfrThreadId.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/method.hpp"
 #include "oops/oop.inline.hpp"
@@ -35,7 +37,6 @@
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/runtimeService.hpp"
-#include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/xmlstream.hpp"
@@ -359,6 +360,23 @@
   st->cr();
 }
 
+static void post_vm_operation_event(EventExecuteVMOperation* event, VM_Operation* op) {
+  assert(event != NULL, "invariant");
+  assert(event->should_commit(), "invariant");
+  assert(op != NULL, "invariant");
+  const bool is_concurrent = op->evaluate_concurrently();
+  const bool evaluate_at_safepoint = op->evaluate_at_safepoint();
+  event->set_operation(op->type());
+  event->set_safepoint(evaluate_at_safepoint);
+  event->set_blocking(!is_concurrent);
+  // Only write caller thread information for non-concurrent vm operations.
+  // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown.
+  // This is because the caller thread could have exited already.
+  event->set_caller(is_concurrent ? 0 : JFR_THREAD_ID(op->calling_thread()));
+  event->set_safepointId(evaluate_at_safepoint ? SafepointSynchronize::safepoint_counter() : 0);
+  event->commit();
+}
+
 void VMThread::evaluate_operation(VM_Operation* op) {
   ResourceMark rm;
 
@@ -374,19 +392,9 @@
 #endif /* USDT2 */
 
     EventExecuteVMOperation event;
-
     op->evaluate();
-
     if (event.should_commit()) {
-      bool is_concurrent = op->evaluate_concurrently();
-      event.set_operation(op->type());
-      event.set_safepoint(op->evaluate_at_safepoint());
-      event.set_blocking(!is_concurrent);
-      // Only write caller thread information for non-concurrent vm operations.
-      // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown.
-      // This is because the caller thread could have exited already.
-      event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id());
-      event.commit();
+      post_vm_operation_event(&event, op);
     }
 
 #ifndef USDT2
--- a/src/share/vm/runtime/vm_operations.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/vm_operations.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -37,7 +37,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/threadService.hpp"
-#include "trace/tracing.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/src/share/vm/runtime/vm_version.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/runtime/vm_version.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -98,6 +98,7 @@
 
   // does HW support an 8-byte compare-exchange operation?
   static bool supports_cx8()  {
+    assert(_initialized, "not initialized");
 #ifdef SUPPORTS_NATIVE_CX8
     return true;
 #else
@@ -106,10 +107,10 @@
   }
   // does HW support atomic get-and-set or atomic get-and-add?  Used
   // to guide intrinsification decisions for Unsafe atomic ops
-  static bool supports_atomic_getset4()  {return _supports_atomic_getset4;}
-  static bool supports_atomic_getset8()  {return _supports_atomic_getset8;}
-  static bool supports_atomic_getadd4()  {return _supports_atomic_getadd4;}
-  static bool supports_atomic_getadd8()  {return _supports_atomic_getadd8;}
+  static bool supports_atomic_getset4()  { assert(_initialized, "not initialized"); return _supports_atomic_getset4;}
+  static bool supports_atomic_getset8()  { assert(_initialized, "not initialized"); return _supports_atomic_getset8;}
+  static bool supports_atomic_getadd4()  { assert(_initialized, "not initialized"); return _supports_atomic_getadd4;}
+  static bool supports_atomic_getadd8()  { assert(_initialized, "not initialized"); return _supports_atomic_getadd8;}
 
   static unsigned int logical_processors_per_package() {
     return _logical_processors_per_package;
--- a/src/share/vm/services/diagnosticArgument.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/services/diagnosticArgument.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -280,7 +280,7 @@
                                                   size_t len, TRAPS) {
   if (str == NULL) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-              "Integer parsing error nanotime value: syntax error");
+               "Parsing error memory size value: syntax error, value is null\n");
   }
 
   if (*str == '-') {
--- a/src/share/vm/services/memTracker.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/services/memTracker.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -75,7 +75,7 @@
 
 #else
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/threadCritical.hpp"
 #include "services/mallocTracker.hpp"
 #include "services/virtualMemoryTracker.hpp"
--- a/src/share/vm/trace/noTraceBackend.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_VM_TRACE_NOTRACEBACKEND_HPP
-#define SHARE_VM_TRACE_NOTRACEBACKEND_HPP
-
-#include "prims/jni.h"
-#include "trace/traceTime.hpp"
-
-class NoTraceBackend {
-public:
-  static TracingTime time() {
-    return 0;
-  }
-};
-
-class TraceThreadData {
-public:
-    TraceThreadData() {}
-};
-
-typedef NoTraceBackend Tracing;
-
-#endif // SHARE_VM_TRACE_NOTRACEBACKEND_HPP
--- a/src/share/vm/trace/trace.dtd	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
-  
--->
-
-<!ELEMENT trace (xi:include, relation_decls, events*, xi:include, xi:include)>
-<!ELEMENT types (content_types, primary_types)>
-<!ELEMENT content_types (content_type|struct_type)*>
-<!ELEMENT content_type (value|structvalue|structarray|array)*>
-<!ELEMENT struct_type (value*)>
-<!ELEMENT primary_types (primary_type*)>
-<!ELEMENT primary_type EMPTY>
-<!ELEMENT relation_decls (relation_decl*)>
-<!ELEMENT relation_decl EMPTY>
-<!ELEMENT events (event|struct)*>
-<!ELEMENT event (value|structvalue)*>
-<!ELEMENT struct (value|structvalue)*>
-<!ELEMENT value EMPTY>
-<!ELEMENT structvalue EMPTY>
-<!ELEMENT structarray EMPTY>
-<!ELEMENT array EMPTY>
-<!ATTLIST content_type  id             CDATA #REQUIRED
-                        hr_name        CDATA #REQUIRED
-                        type           CDATA #REQUIRED
-                        jvm_type       CDATA #IMPLIED
-                        builtin_type   CDATA #IMPLIED>
-<!ATTLIST struct_type   id             CDATA #REQUIRED>
-<!ATTLIST structarray   type           CDATA #REQUIRED
-                        field          CDATA #REQUIRED
-                        label          CDATA #REQUIRED>
-<!ATTLIST primary_type  symbol         CDATA #REQUIRED
-                        datatype       CDATA #REQUIRED
-                        contenttype    CDATA #REQUIRED
-                        type           CDATA #REQUIRED
-                        sizeop         CDATA #REQUIRED>
-<!ATTLIST relation_decl id             CDATA #REQUIRED
-                        uri            CDATA #REQUIRED>
-<!ATTLIST event         id             CDATA #REQUIRED
-                        path           CDATA #REQUIRED
-                        label          CDATA #REQUIRED
-                        description    CDATA #IMPLIED
-                        has_thread     CDATA "false"
-                        ignore_check   CDATA "false"
-                        has_stacktrace CDATA "false"
-                        is_instant     CDATA "false"
-                        is_constant    CDATA "false"
-                        is_requestable CDATA "false">
-<!ATTLIST struct        id             CDATA #REQUIRED>
-<!ATTLIST value         type           CDATA #REQUIRED
-                        field          CDATA #REQUIRED
-                        label          CDATA #REQUIRED
-                        description    CDATA #IMPLIED
-                        relation       CDATA "NOT_AVAILABLE"
-                        transition     CDATA "NONE">
-<!ATTLIST array         type           CDATA #REQUIRED
-                        field          CDATA #REQUIRED
-                        label          CDATA #REQUIRED
-                        description    CDATA #IMPLIED>
-<!ATTLIST structarray   type           CDATA #REQUIRED
-                        field          CDATA #REQUIRED
-                        label          CDATA #REQUIRED
-                        description    CDATA #IMPLIED>
-<!ATTLIST structvalue   type           CDATA #REQUIRED
-                        field          CDATA #REQUIRED
-                        label          CDATA #REQUIRED
-                        description    CDATA #IMPLIED>
--- a/src/share/vm/trace/trace.xml	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,443 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
- Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
-
--->
-
-
-<!DOCTYPE trace SYSTEM "trace.dtd" [
-<!ENTITY % xinclude SYSTEM "xinclude.mod">
-%xinclude;
-]>
-
-<trace>
-  <xi:include href="tracetypes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
-
-  <relation_decls>
-    <relation_decl id="GC_ID" uri="vm/gc/id"/>
-    <relation_decl id="COMP_ID" uri="vm/compiler/id"/>
-    <relation_decl id="SWEEP_ID" uri="vm/code_sweeper/id"/>
-    <relation_decl id="JAVA_MONITOR_ADDRESS" uri="java/monitor/address"/>
-  </relation_decls>
-
-<!--
-
-Events in the JVM are by default timed (it's more common)
-Perhaps a little strange. Might change.
-
-EVENTS
-
-Declard with the 'event' tag.
-
-<value fields> can be one or more of
-   value            - a simple primitive or constant type value
-   structvalue      - value is a sub-struct. This type must be previously defined
-                      with 'struct'
-All these require you to declare type, field and label of the field. They also accept
-an optional description of the field. If the meaning of the field is not obvious
-from the label you should provide a description. If an event however is not actually
-meant for end-users, you should probably _not_ write descriptions at all, since you
-might just add more concepts the user has no notion of/interest in.
-
-Events should be modeled after what conceptual process you are expressing, _NOT_
-from whatever data structures you might use inside the JVM for expressing a process.
-
-
-STRUCT
-
-Declared with the 'struct' tag.
-
-Declares a structure type that can be used in other events.
-
--->
-
-  <events>
-    <event id="ThreadStart" path="java/thread_start" label="Java Thread Start"
-           has_thread="true" is_instant="true">
-      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
-    </event>
-
-    <event id="ThreadEnd" path="java/thread_end" label="Java Thread End"
-           has_thread="true" is_instant="true">
-      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
-    </event>
-
-    <event id="ThreadSleep" path="java/thread_sleep" label="Java Thread Sleep"
-            has_thread="true" has_stacktrace="true" is_instant="false">
-      <value type="MILLIS" field="time" label="Sleep Time"/>
-    </event>
-
-    <event id="ThreadPark" path="java/thread_park" label="Java Thread Park"
-            has_thread="true" has_stacktrace="true" is_instant="false">
-      <value type="CLASS" field="klass" label="Class Parked On"/>
-      <value type="MILLIS" field="timeout" label="Park Timeout"/>
-      <value type="ADDRESS" field="address" label="Address of Object Parked" relation="JAVA_MONITOR_ADDRESS"/>
-    </event>
-
-    <event id="JavaMonitorEnter" path="java/monitor_enter" label="Java Monitor Blocked"
-            has_thread="true" has_stacktrace="true" is_instant="false">
-      <value type="CLASS" field="klass" label="Monitor Class"/>
-      <value type="JAVALANGTHREAD" field="previousOwner" label="Previous Monitor Owner"/>
-      <value type="ADDRESS" field="address" label="Monitor Address" relation="JAVA_MONITOR_ADDRESS"/>
-    </event>
-
-    <event id="JavaMonitorWait" path="java/monitor_wait" label="Java Monitor Wait" description="Waiting on a Java monitor"
-            has_thread="true" has_stacktrace="true" is_instant="false">
-      <value type="CLASS" field="klass" label="Monitor Class" description="Class of object waited on"/>
-      <value type="OSTHREAD" field="notifier" label="Notifier Thread" description="Notifying Thread"/>
-      <value type="MILLIS" field="timeout" label="Timeout" description="Maximum wait time"/>
-      <value type="BOOLEAN" field="timedOut" label="Timed Out" description="Wait has been timed out"/>
-      <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/>
-    </event>
-
-    <event id="ClassLoad" path="vm/class/load" label="Class Load"
-            has_thread="true" has_stacktrace="true" is_instant="false">
-      <value type="CLASS" field="loadedClass" label="Loaded Class"/>
-      <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
-      <value type="CLASS" field="initiatingClassLoader" label="Initiating Class Loader"/>
-    </event>
-
-    <event id="ClassUnload" path="vm/class/unload" label="Class Unload"
-        has_thread="true" is_instant="true">
-      <value type="CLASS" field="unloadedClass" label="Unloaded Class"/>
-      <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
-    </event>
-
-    <event id="LongFlagChanged" path="vm/flag/long_changed" label="Long Flag Changed"
-          is_instant="true">
-      <value type="UTF8" field="name" label="Name" />
-      <value type="LONG" field="old_value" label="Old Value" />
-      <value type="LONG" field="new_value" label="New Value" />
-      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
-    </event>
-
-    <event id="UnsignedLongFlagChanged" path="vm/flag/ulong_changed" label="Unsigned Long Flag Changed"
-          is_instant="true">
-      <value type="UTF8" field="name" label="Name" />
-      <value type="ULONG" field="old_value" label="Old Value" />
-      <value type="ULONG" field="new_value" label="New Value" />
-      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
-    </event>
-
-    <event id="DoubleFlagChanged" path="vm/flag/double_changed" label="Double Flag Changed"
-         is_instant="true">
-      <value type="UTF8" field="name" label="Name" />
-      <value type="DOUBLE" field="old_value" label="Old Value" />
-      <value type="DOUBLE" field="new_value" label="New Value" />
-      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
-    </event>
-
-    <event id="BooleanFlagChanged" path="vm/flag/boolean_changed" label="Boolean Flag Changed"
-         is_instant="true">
-      <value type="UTF8" field="name" label="Name" />
-      <value type="BOOLEAN" field="old_value" label="Old Value" />
-      <value type="BOOLEAN" field="new_value" label="New Value" />
-      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
-    </event>
-
-    <event id="StringFlagChanged" path="vm/flag/string_changed" label="String Flag Changed"
-         is_instant="true">
-      <value type="UTF8" field="name" label="Name" />
-      <value type="UTF8" field="old_value" label="Old Value" />
-      <value type="UTF8" field="new_value" label="New Value" />
-      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
-    </event>
-
-    <struct id="VirtualSpace">
-      <value type="ADDRESS" field="start" label="Start Address" description="Start address of the virtual space" />
-      <value type="ADDRESS" field="committedEnd" label="Committed End Address" description="End address of the committed memory for the virtual space" />
-      <value type="BYTES64" field="committedSize" label="Committed Size" description="Size of the committed memory for the virtual space" />
-      <value type="ADDRESS" field="reservedEnd" label="Reserved End Address" description="End address of the reserved memory for the virtual space" />
-      <value type="BYTES64" field="reservedSize" label="Reserved Size" description="Size of the reserved memory for the virtual space" />
-    </struct>
-
-    <struct id="ObjectSpace">
-      <value type="ADDRESS" field="start" label="Start Address" description="Start address of the space" />
-      <value type="ADDRESS" field="end" label="End Address" description="End address of the space" />
-      <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
-      <value type="BYTES64" field="size" label="Size" description="Size of the space" />
-    </struct>
-
-    <event id="GCHeapSummary" path="vm/gc/heap/summary" label="Heap Summary" is_instant="true">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="GCWHEN" field="when" label="When" />
-      <structvalue type="VirtualSpace" field="heapSpace" label="Heap Space"/>
-      <value type="BYTES64" field="heapUsed" label="Heap Used" description="Bytes allocated by objects in the heap"/>
-    </event>
-
-    <struct id="MetaspaceSizes">
-      <value type="BYTES64" field="committed" label="Committed" description="Committed memory for this space" />
-      <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
-      <value type="BYTES64" field="reserved" label="Reserved" description="Reserved memory for this space" />
-    </struct>
-
-    <event id="MetaspaceSummary" path="vm/gc/heap/metaspace_summary" label="Metaspace Summary" is_instant="true">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="GCWHEN" field="when" label="When" />
-      <value type="BYTES64" field="gcThreshold" label="GC Threshold" />
-      <structvalue type="MetaspaceSizes" field="metaspace" label="Total"/>
-      <structvalue type="MetaspaceSizes" field="dataSpace" label="Data"/>
-      <structvalue type="MetaspaceSizes" field="classSpace" label="Class"/>
-    </event>
-
-    <event id="MetaspaceGCThreshold" path="vm/gc/metaspace/gc_threshold" label="Metaspace GC Threshold" is_instant="true">
-      <value type="BYTES64" field="oldValue" label="Old Value" />
-      <value type="BYTES64" field="newValue" label="New Value" />
-      <value type="GCTHRESHOLDUPDATER" field="updater" label="Updater" />
-    </event>
-
-    <event id="MetaspaceAllocationFailure" path="vm/gc/metaspace/allocation_failure" label="Metaspace Allocation Failure" is_instant="true" has_stacktrace="true">
-      <value type="CLASS" field="classLoader" label="Class Loader" />
-      <value type="BOOLEAN" field="anonymousClassLoader" label="Anonymous Class Loader" />
-      <value type="BYTES64" field="size" label="Size" />
-      <value type="METADATATYPE" field="metadataType" label="Metadata Type" />
-      <value type="METASPACEOBJTYPE" field="metaspaceObjectType" label="Metaspace Object Type" />
-    </event>
-
-    <event id="MetaspaceOOM" path="vm/gc/metaspace/out_of_memory" label="Metaspace Out of Memory" is_instant="true" has_stacktrace="true">
-      <value type="CLASS" field="classLoader" label="Class Loader" />
-      <value type="BOOLEAN" field="anonymousClassLoader" label="Anonymous Class Loader" />
-      <value type="BYTES64" field="size" label="Size" />
-      <value type="METADATATYPE" field="metadataType" label="Metadata Type" />
-      <value type="METASPACEOBJTYPE" field="metaspaceObjectType" label="Metaspace Object Type" />
-    </event>
-
-    <event id="MetaspaceChunkFreeListSummary" path="vm/gc/metaspace/chunk_free_list_summary" label="Metaspace Chunk Free List Summary" is_instant="true">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="GCWHEN" field="when" label="When" />
-      <value type="METADATATYPE" field="metadataType" label="Metadata Type" />
-      <value type="ULONG" field="specializedChunks" label="Specialized Chunks" />
-      <value type="BYTES64" field="specializedChunksTotalSize" label="Specialized Chunks Total Size" />
-      <value type="ULONG" field="smallChunks" label="Small Chunks" />
-      <value type="BYTES64" field="smallChunksTotalSize" label="Small Chunks Total Size" />
-      <value type="ULONG" field="mediumChunks" label="Medium Chunks" />
-      <value type="BYTES64" field="mediumChunksTotalSize" label="Medium Chunks Total Size" />
-      <value type="ULONG" field="humongousChunks" label="Humongous Chunks" />
-      <value type="BYTES64" field="humongousChunksTotalSize" label="Humongous Chunks Total Size" />
-    </event>
-
-    <event id="PSHeapSummary" path="vm/gc/heap/ps_summary" label="Parallel Scavenge Heap Summary" is_instant="true">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="GCWHEN" field="when" label="When" />
-
-      <structvalue type="VirtualSpace" field="oldSpace" label="Old Space"/>
-      <structvalue type="ObjectSpace" field="oldObjectSpace" label="Old Object Space"/>
-
-      <structvalue type="VirtualSpace" field="youngSpace" label="Young Space"/>
-      <structvalue type="ObjectSpace" field="edenSpace" label="Eden Space"/>
-      <structvalue type="ObjectSpace" field="fromSpace" label="From Space"/>
-      <structvalue type="ObjectSpace" field="toSpace" label="To Space"/>
-    </event>
-
-    <event id="GCGarbageCollection" path="vm/gc/collector/garbage_collection" label="Garbage Collection"
-           description="Garbage collection performed by the JVM">
-      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
-      <value type="GCNAME" field="name" label="Name" description="The name of the Garbage Collector" />
-      <value type="GCCAUSE" field="cause" label="Cause" description="The reason for triggering this Garbage Collection" />
-      <value type="TICKSPAN" field="sumOfPauses" label="Sum of Pauses" description="Sum of all the times in which Java execution was paused during the garbage collection" />
-      <value type="TICKSPAN" field="longestPause" label="Longest Pause" description="Longest individual pause during the garbage collection" />
-    </event>
-
-    <event id="GCParallelOld" path="vm/gc/collector/parold_garbage_collection" label="Parallel Old Garbage Collection"
-           description="Extra information specific to Parallel Old Garbage Collections">
-      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
-      <value type="ADDRESS" field="densePrefix" label="Dense Prefix" description="The address of the dense prefix, used when compacting" />
-    </event>
-
-    <event id="GCYoungGarbageCollection" path="vm/gc/collector/young_garbage_collection" label="Young Garbage Collection"
-           description="Extra information specific to Young Garbage Collections">
-      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
-      <value type="UINT" field="tenuringThreshold" label="Tenuring Threshold" />
-    </event>
-
-    <event id="GCOldGarbageCollection" path="vm/gc/collector/old_garbage_collection" label="Old Garbage Collection"
-           description="Extra information specific to Old Garbage Collections">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-    </event>
-
-    <event id="GCG1GarbageCollection" path="vm/gc/collector/g1_garbage_collection" label="G1 Garbage Collection"
-           description="Extra information specific to G1 Garbage Collections">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="G1YCTYPE" field="type" label="Type" />
-    </event>
-
-    <event id="EvacuationInfo" path="vm/gc/detailed/evacuation_info" label="Evacuation Information" is_instant="true">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="UINT" field="cSetRegions" label="Collection Set Regions"/>
-      <value type="BYTES64" field="cSetUsedBefore" label="Collection Set Before" description="Memory usage before GC in the collection set regions"/>
-      <value type="BYTES64" field="cSetUsedAfter" label="Collection Set After" description="Memory usage after GC in the collection set regions"/>
-      <value type="UINT" field="allocationRegions" label="Allocation Regions" description="Regions chosen as allocation regions during evacuation (includes survivors and old space regions)"/>
-      <value type="BYTES64" field="allocRegionsUsedBefore" label="Alloc Regions Before" description="Memory usage before GC in allocation regions"/>
-      <value type="BYTES64" field="allocRegionsUsedAfter" label="Alloc Regions After" description="Memory usage after GC in allocation regions"/>
-      <value type="BYTES64" field="bytesCopied" label="Bytes Copied"/>
-      <value type="UINT" field="regionsFreed" label="Regions Freed"/>
-    </event>
-
-    <event id="GCReferenceStatistics" path="vm/gc/reference/statistics"
-           label="GC Reference Statistics" is_instant="true"
-           description="Total count of processed references during GC">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="REFERENCETYPE" field="type" label="Type" />
-      <value type="ULONG" field="count" label="Total Count" />
-    </event>
-
-    <struct id="CopyFailed">
-      <value type="ULONG" field="objectCount" label="Object Count"/>
-      <value type="BYTES64" field="firstSize" label="First Failed Object Size"/>
-      <value type="BYTES64" field="smallestSize" label="Smallest Failed Object Size"/>
-      <value type="BYTES64" field="totalSize" label="Total Object Size"/>
-    </struct>
-
-    <event id="ObjectCountAfterGC" path="vm/gc/detailed/object_count_after_gc" is_instant="true" label="Object Count after GC">
-      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
-      <value type="CLASS" field="class" label="Class" />
-      <value type="LONG" field="count" label="Count" />
-      <value type="BYTES64" field="totalSize" label="Total Size" />
-    </event>
-
-    <event id="PromotionFailed" path="vm/gc/detailed/promotion_failed" label="Promotion Failed" is_instant="true"
-           description="Promotion of an object failed">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <structvalue type="CopyFailed" field="data" label="Data"/>
-      <value type="OSTHREAD" field="thread" label="Running thread"/>
-    </event>
-
-    <event id="EvacuationFailed" path="vm/gc/detailed/evacuation_failed" label="Evacuation Failed" is_instant="true"
-           description="Evacuation of an object failed">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <structvalue type="CopyFailed" field="data" label="Data"/>
-    </event>
-
-    <event id="ConcurrentModeFailure" path="vm/gc/detailed/concurrent_mode_failure" label="Concurrent Mode Failure"
-           is_instant="true" description="Concurrent Mode failed">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-    </event>
-
-    <event id="GCPhasePause" path="vm/gc/phases/pause" label="GC Phase Pause">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="UTF8" field="name" label="Name" />
-    </event>
-
-    <event id="GCPhasePauseLevel1" path="vm/gc/phases/pause_level_1" label="GC Phase Pause Level 1">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="UTF8" field="name" label="Name" />
-    </event>
-
-    <event id="GCPhasePauseLevel2" path="vm/gc/phases/pause_level_2" label="GC Phase Pause Level 2">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="UTF8" field="name" label="Name" />
-    </event>
-
-    <event id="GCPhasePauseLevel3" path="vm/gc/phases/pause_level_3" label="GC Phase Pause Level 3">
-      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
-      <value type="UTF8" field="name" label="Name" />
-    </event>
-
-    <event id="AllocationRequiringGC" path="vm/gc/detailed/allocation_requiring_gc" label="Allocation Requiring GC"
-           has_thread="true" has_stacktrace="true"  is_instant="true">
-      <value type="UINT" field="gcId"  label="Pending GC ID" relation="GC_ID" />
-      <value type="BYTES64" field="size" label="Allocation Size" />
-    </event>
-
-    <!-- Compiler events -->
-
-    <event id="Compilation" path="vm/compiler/compilation" label="Compilation"
-         has_thread="true" is_requestable="false" is_constant="false">
-      <value type="METHOD" field="method" label="Java Method"/>
-      <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
-      <value type="USHORT" field="compileLevel" label="Compilation Level"/>
-      <value type="BOOLEAN" field="succeded" label="Succeeded"/>
-      <value type="BOOLEAN" field="isOsr" label="On Stack Replacement"/>
-      <value type="BYTES" field="codeSize" label="Compiled Code Size"/>
-      <value type="BYTES" field="inlinedBytes" label="Inlined Code Size"/>
-    </event>
-
-    <event id="CompilerPhase" path="vm/compiler/phase" label="Compiler Phase"
-            has_thread="true" is_requestable="false" is_constant="false">
-      <value type="COMPILERPHASETYPE" field="phase" label="Compile Phase"/>
-      <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
-      <value type="USHORT" field="phaseLevel" label="Phase Level"/>
-    </event>
-
-    <event id="CompilerFailure" path="vm/compiler/failure" label="Compilation Failure"
-            has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
-      <value type="UTF8" field="failure" label="Message"/>
-      <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
-    </event>
-
-    <!-- Code sweeper events -->
-
-    <event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
-       has_thread="true" is_requestable="false" is_constant="false">
-      <value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
-      <value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/>
-      <value type="UINT" field="sweptCount" label="Methods Swept"/>
-      <value type="UINT" field="flushedCount" label="Methods Flushed"/>
-      <value type="UINT" field="markedCount" label="Methods Reclaimed"/>
-      <value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
-    </event>
-
-    <!-- Code cache events -->
-
-    <event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
-         has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
-      <value type="ADDRESS" field="startAddress" label="Start Address"/>
-      <value type="ADDRESS" field="commitedTopAddress" label="Commited Top"/>
-      <value type="ADDRESS" field="reservedTopAddress" label="Reserved Top"/>
-      <value type="INTEGER" field="entryCount" label="Entries"/>
-      <value type="INTEGER" field="methodCount" label="Methods"/>
-      <value type="INTEGER" field="adaptorCount" label="Adaptors"/>
-      <value type="BYTES64" field="unallocatedCapacity" label="Unallocated"/>
-      <value type="INTEGER" field="fullCount" label="Full Count"/>
-    </event>
-
-    <event id="ExecuteVMOperation" path="vm/runtime/execute_vm_operation" label="VM Operation"
-        description="Execution of a VM Operation" has_thread="true">
-      <value type="VMOPERATIONTYPE" field="operation" label="Operation" />
-      <value type="BOOLEAN" field="safepoint" label="At Safepoint" description="If the operation occured at a safepoint."/>
-      <value type="BOOLEAN" field="blocking" label="Caller Blocked" description="If the calling thread was blocked until the operation was complete."/>
-      <value type="OSTHREAD" field="caller" label="Caller" transition="FROM" description="Thread requesting operation. If non-blocking, will be set to 0 indicating thread is unknown."/>
-    </event>
-
-    <!-- Allocation events -->
-    <event id="AllocObjectInNewTLAB" path="java/object_alloc_in_new_TLAB" label="Allocation in new TLAB"
-        description="Allocation in new Thread Local Allocation Buffer" has_thread="true" has_stacktrace="true" is_instant="true">
-      <value type="CLASS" field="class" label="Class" description="Class of allocated object"/>
-      <value type="BYTES64" field="allocationSize" label="Allocation Size"/>
-      <value type="BYTES64" field="tlabSize" label="TLAB Size"/>
-    </event>
-
-    <event id="AllocObjectOutsideTLAB" path="java/object_alloc_outside_TLAB" label="Allocation outside TLAB"
-        description="Allocation outside Thread Local Allocation Buffers" has_thread="true" has_stacktrace="true" is_instant="true">
-      <value type="CLASS" field="class" label="Class" description="Class of allocated object"/>
-      <value type="BYTES64" field="allocationSize" label="Allocation Size"/>
-    </event>
-  </events>
-
-  <xi:include href="../../../closed/share/vm/trace/traceeventtypes.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
-    <xi:fallback/>
-  </xi:include>
-
-  <xi:include href="../../../closed/share/vm/trace/traceevents.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
-    <xi:fallback/>
-  </xi:include>
-</trace>
--- a/src/share/vm/trace/traceBackend.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_VM_TRACE_TRACEBACKEND_HPP
-#define SHARE_VM_TRACE_TRACEBACKEND_HPP
-
-#include "utilities/macros.hpp"
-#if INCLUDE_TRACE
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "trace/traceTime.hpp"
-#include "tracefiles/traceEventIds.hpp"
-
-class TraceBackend {
-public:
-  static bool enabled(void) {
-    return EnableTracing;
-  }
-
-  static bool is_event_enabled(TraceEventId id) {
-    return enabled();
-  }
-
-  static TracingTime time() {
-    return os::elapsed_counter();
-  }
-
-  static void on_unloading_classes(void) {
-  }
-};
-
-class TraceThreadData {
-public:
-    TraceThreadData() {}
-};
-
-typedef TraceBackend Tracing;
-
-#else // !INCLUDE_TRACE
-#include "trace/noTraceBackend.hpp"
-#endif // INCLUDE_TRACE
-#endif // SHARE_VM_TRACE_TRACEBACKEND_HPP
--- a/src/share/vm/trace/traceDataTypes.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_TRACE_TRACEDATATYPES_HPP
-#define SHARE_VM_TRACE_TRACEDATATYPES_HPP
-
-#include <stddef.h>
-
-#include "utilities/globalDefinitions.hpp"
-
-enum {
-  CONTENT_TYPE_NONE             = 0,
-  CONTENT_TYPE_BYTES            = 1,
-  CONTENT_TYPE_EPOCHMILLIS      = 2,
-  CONTENT_TYPE_MILLIS           = 3,
-  CONTENT_TYPE_NANOS            = 4,
-  CONTENT_TYPE_TICKS            = 5,
-  CONTENT_TYPE_ADDRESS          = 6,
-
-  CONTENT_TYPE_OSTHREAD,
-  CONTENT_TYPE_JAVALANGTHREAD,
-  CONTENT_TYPE_STACKTRACE,
-  CONTENT_TYPE_CLASS,
-  CONTENT_TYPE_PERCENTAGE,
-
-  JVM_CONTENT_TYPES_START       = 30,
-  JVM_CONTENT_TYPES_END         = 100
-};
-
-enum ReservedEvent {
-  EVENT_PRODUCERS,
-  EVENT_CHECKPOINT,
-  EVENT_BUFFERLOST,
-
-  NUM_RESERVED_EVENTS
-};
-
-typedef enum ReservedEvent ReservedEvent;
-
-typedef u8 classid;
-typedef u8 stacktraceid;
-typedef u8 methodid;
-typedef u8 fieldid;
-
-class TraceUnicodeString;
-
-#endif // SHARE_VM_TRACE_TRACEDATATYPES_HPP
-
--- a/src/share/vm/trace/traceEvent.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_TRACE_TRACEEVENT_HPP
-#define SHARE_VM_TRACE_TRACEEVENT_HPP
-
-#include "utilities/macros.hpp"
-
-enum EventStartTime {
-  UNTIMED,
-  TIMED
-};
-
-#if INCLUDE_TRACE
-#include "trace/traceBackend.hpp"
-#include "trace/tracing.hpp"
-#include "tracefiles/traceEventIds.hpp"
-#include "tracefiles/traceTypes.hpp"
-#include "utilities/ticks.hpp"
-
-template<typename T>
-class TraceEvent : public StackObj {
- private:
-  bool _started;
-#ifdef ASSERT
-  bool _committed;
-  bool _cancelled;
- protected:
-  bool _ignore_check;
-#endif
-
- protected:
-  jlong _startTime;
-  jlong _endTime;
-
-  void set_starttime(const TracingTime& time) {
-    _startTime = time;
-  }
-
-  void set_endtime(const TracingTime& time) {
-    _endTime = time;
-  }
-
- public:
-  TraceEvent(EventStartTime timing=TIMED) :
-    _startTime(0),
-    _endTime(0),
-    _started(false)
-#ifdef ASSERT
-    ,
-    _committed(false),
-    _cancelled(false),
-    _ignore_check(false)
-#endif
-  {
-    if (T::is_enabled()) {
-      _started = true;
-      if (timing == TIMED && !T::isInstant) {
-        static_cast<T *>(this)->set_starttime(Tracing::time());
-      }
-    }
-  }
-
-  static bool is_enabled() {
-    return Tracing::is_event_enabled(T::eventId);
-  }
-
-  bool should_commit() {
-    return _started;
-  }
-
-  void ignoreCheck() {
-    DEBUG_ONLY(_ignore_check = true);
-  }
-
-  void commit() {
-    if (!should_commit()) {
-        cancel();
-        return;
-    }
-    if (_endTime == 0) {
-      static_cast<T*>(this)->set_endtime(Tracing::time());
-    }
-    if (static_cast<T*>(this)->should_write()) {
-      static_cast<T*>(this)->writeEvent();
-    }
-    set_commited();
-  }
-
-  void set_starttime(const Ticks& time) {
-    _startTime = time.value();
-  }
-
-  void set_endtime(const Ticks& time) {
-    _endTime = time.value();
-  }
-
-  TraceEventId id() const {
-    return T::eventId;
-  }
-
-  bool is_instant() const {
-    return T::isInstant;
-  }
-
-  bool is_requestable() const {
-    return T::isRequestable;
-  }
-
-  bool has_thread() const {
-    return T::hasThread;
-  }
-
-  bool has_stacktrace() const {
-    return T::hasStackTrace;
-  }
-
-  void cancel() {
-    assert(!_committed && !_cancelled, "event was already committed/cancelled");
-    DEBUG_ONLY(_cancelled = true);
-  }
-
-  void set_commited() {
-    assert(!_committed, "event has already been committed");
-    DEBUG_ONLY(_committed = true);
-  }
-
-  ~TraceEvent() {
-    if (_started) {
-      assert(_ignore_check || _committed || _cancelled, "event was not committed/cancelled");
-    }
-  }
-};
-
-#endif // INCLUDE_TRACE
-#endif // SHARE_VM_TRACE_TRACEEVENT_HPP
--- a/src/share/vm/trace/traceEventClasses.xsl	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,251 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
- Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
--->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:import href="xsl_util.xsl"/>
-<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-
-<xsl:template match="/">
-  <xsl:call-template name="file-header"/>
-
-#ifndef TRACEFILES_TRACEEVENTCLASSES_HPP
-#define TRACEFILES_TRACEEVENTCLASSES_HPP
-
-// On purpose outside the INCLUDE_TRACE
-// Some parts of traceEvent.hpp are used outside of
-// INCLUDE_TRACE
-
-#include "tracefiles/traceTypes.hpp"
-#include "trace/traceEvent.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/ticks.hpp"
-#if INCLUDE_TRACE
-#include "trace/traceStream.hpp"
-#include "utilities/ostream.hpp"
-
-  <xsl:apply-templates select="trace/events/struct" mode="trace"/>
-  <xsl:apply-templates select="trace/events/event" mode="trace"/>
-
-#else // !INCLUDE_TRACE
-
-class TraceEvent {
-public:
-  TraceEvent() {}
-  void set_starttime(const Ticks&amp; time) {}
-  void set_endtime(const Ticks&amp; time) {}
-  bool should_commit() const { return false; }
-  void commit() const {}
-};
-
-  <xsl:apply-templates select="trace/events/struct" mode="empty"/>
-  <xsl:apply-templates select="trace/events/event" mode="empty"/>
-
-#endif // INCLUDE_TRACE
-#endif // TRACEFILES_TRACEEVENTCLASSES_HPP
-</xsl:template>
-
-<xsl:template match="struct" mode="trace">
-struct TraceStruct<xsl:value-of select="@id"/>
-{
-private:
-<xsl:apply-templates select="value" mode="write-fields"/>
-public:
-<xsl:apply-templates select="value" mode="write-setters"/>
-
-  void writeStruct(TraceStream&amp; ts) {
-<xsl:apply-templates select="value" mode="write-data"/>
-  }
-};
-
-</xsl:template>
-
-<xsl:template match="struct" mode="empty">
-struct TraceStruct<xsl:value-of select="@id"/> 
-{
-public:
-<xsl:apply-templates select="value" mode="write-empty-setters"/>
-};
-</xsl:template>
-
-
-<xsl:template match="event" mode="empty">
-  <xsl:value-of select="concat('class Event', @id, ' : public TraceEvent')"/>
-{
- public:
-<xsl:value-of select="concat('  Event', @id, '(bool ignore=true) {}')"/>
-<xsl:text>
-</xsl:text>
-
-<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-empty-setters"/>
-};
-
-</xsl:template>
-
-
-<xsl:template match="event" mode="trace">
-  <xsl:value-of select="concat('class Event', @id, ' : public TraceEvent&lt;Event', @id, '&gt;')"/>
-{
- public:
-  static const bool hasThread = <xsl:value-of select="@has_thread"/>;
-  static const bool hasStackTrace = <xsl:value-of select="@has_stacktrace"/>;
-  static const bool isInstant = <xsl:value-of select="@is_instant"/>;
-  static const bool isRequestable = <xsl:value-of select="@is_requestable"/>;
-  static const TraceEventId eventId = <xsl:value-of select="concat('Trace', @id, 'Event')"/>;
-
- private:
-<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-fields"/>
-
-  void writeEventContent(void) {
-    TraceStream ts(*tty);
-    ts.print("<xsl:value-of select="@label"/>: [");
-<xsl:apply-templates select="value|structvalue" mode="write-data"/>
-    ts.print("]\n");
-  }
-
- public:
-<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-setters"/>
-
-  bool should_write(void) {
-    return true;
-  }
-<xsl:text>
-
-</xsl:text>
-  <xsl:value-of select="concat('  Event', @id, '(EventStartTime timing=TIMED) : TraceEvent&lt;Event', @id, '&gt;(timing) {}', $newline)"/>
-  void writeEvent(void) {
-    if (UseLockedTracing) {
-      ttyLocker lock;
-      writeEventContent();
-    } else {
-      writeEventContent();
-    }
-  }
-};
-
-</xsl:template>
-
-<xsl:template match="value|transition_value|relation" mode="write-empty-setters">
-  <xsl:param name="cls"/>
-  <xsl:variable name="type" select="@type"/>
-  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
-  <xsl:value-of select="concat('  void set_', @field, '(', $wt, ' value) { }')"/>
-  <xsl:if test="position() != last()">
-    <xsl:text>
-</xsl:text>
-  </xsl:if>
-</xsl:template>
-
-<xsl:template match="structvalue" mode="write-empty-setters">
-  <xsl:param name="cls"/>
-  <xsl:value-of select="concat('  void set_', @field, '(const TraceStruct', @type, '&amp; value) { }')"/>
-  <xsl:if test="position() != last()">
-    <xsl:text>
-</xsl:text>
-  </xsl:if>
-</xsl:template>
-
-<xsl:template match="value[@type='TICKS']" mode="write-setters">
-#if INCLUDE_TRACE
-<xsl:value-of select="concat('  void set_', @field, '(const Ticks&amp; time) { _', @field, ' = time; }')"/>
-#else
-<xsl:value-of select="concat('  void set_', @field, '(const Ticks&amp; ignore) {}')"/>
-#endif
-</xsl:template>
-
-<xsl:template match="value[@type='TICKSPAN']" mode="write-setters">
-#if INCLUDE_TRACE
-  <xsl:value-of select="concat('  void set_', @field, '(const Tickspan&amp; time) { _', @field, ' = time; }')"/>
-#else
-  <xsl:value-of select="concat('  void set_', @field, '(const Tickspan&amp; ignore) {}')"/>
-#endif
-</xsl:template>
-
-
-<xsl:template match="value" mode="write-fields">
-  <xsl:variable name="type" select="@type"/>
-  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
-  <xsl:value-of select="concat('  ', $wt, ' _', @field, ';')"/>
-  <xsl:if test="position() != last()">
-    <xsl:text> 
-</xsl:text>
-  </xsl:if>
-</xsl:template>
-
-<xsl:template match="structvalue" mode="write-fields">
-  <xsl:value-of select="concat('  TraceStruct', @type, ' _', @field, ';')"/>
-  <xsl:text>
-</xsl:text>
-</xsl:template>
-
-<xsl:template match="value|transition_value|relation" mode="write-setters">
-  <xsl:param name="cls"/>
-  <xsl:variable name="type" select="@type"/>
-  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
-  <xsl:value-of select="concat('  void set_', @field, '(', $wt, ' value) { this->_', @field, ' = value; }')"/>
-  <xsl:if test="position() != last()">
-    <xsl:text>
-</xsl:text>
-  </xsl:if>
-</xsl:template>
-
-<xsl:template match="structvalue" mode="write-setters">
-  <xsl:param name="cls"/>
-  <xsl:value-of select="concat('  void set_', @field, '(const TraceStruct', @type, '&amp; value) { this->_', @field, ' = value; }')"/>
-  <xsl:if test="position() != last()">
-    <xsl:text>
-</xsl:text>
-  </xsl:if>
-</xsl:template>
-
-<xsl:template match="value" mode="write-data">
-  <xsl:variable name="type" select="@type"/>
-  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@writetype"/>
-  <xsl:choose>
-    <xsl:when test="@type='TICKSPAN'">
-      <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, '.value());')"/>
-    </xsl:when>
-    <xsl:when test="@type='TICKS'">
-      <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, '.value());')"/>
-    </xsl:when>
-    <xsl:otherwise>
-      <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, ');')"/>
-    </xsl:otherwise>
-  </xsl:choose>
-  <xsl:if test="position() != last()">
-    <xsl:text>
-    ts.print(", ");
-</xsl:text>
-  </xsl:if>
-</xsl:template>
-
-<xsl:template match="structvalue" mode="write-data">
-  <xsl:value-of select="concat('    _', @field, '.writeStruct(ts);')"/>
-  <xsl:if test="position() != last()">
-    <xsl:text>
-    ts.print(", ");
-</xsl:text>
-  </xsl:if>
-</xsl:template>
-
-</xsl:stylesheet>
--- a/src/share/vm/trace/traceEventIds.xsl	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
--->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:import href="xsl_util.xsl"/>
-<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-
-<xsl:template match="/">
-  <xsl:call-template name="file-header"/>
-
-#ifndef TRACEFILES_TRACEEVENTIDS_HPP
-#define TRACEFILES_TRACEEVENTIDS_HPP
-
-#include "utilities/macros.hpp"
-#if INCLUDE_TRACE
-#include "trace/traceDataTypes.hpp"
-
-/**
- * Enum of the event types in the JVM
- */
-enum TraceEventId {
-  _traceeventbase = (NUM_RESERVED_EVENTS-1), // Make sure we start at right index.
-  
-  // Events -> enum entry
-<xsl:for-each select="trace/events/event">
-  <xsl:value-of select="concat('  Trace', @id, 'Event,', $newline)"/>
-</xsl:for-each>
-  MaxTraceEventId
-};
-
-/**
- * Struct types in the JVM
- */
-enum TraceStructId {
-<xsl:for-each select="trace/types/content_types/*">
-  <xsl:value-of select="concat('  Trace', @id, 'Struct,', $newline)"/>
-</xsl:for-each>
-<xsl:for-each select="trace/events/*">
-  <xsl:value-of select="concat('  Trace', @id, 'Struct,', $newline)"/>
-</xsl:for-each>
-  MaxTraceStructId
-};
-
-typedef enum TraceEventId  TraceEventId;
-typedef enum TraceStructId TraceStructId;
-
-#endif // INCLUDE_TRACE
-#endif // TRACEFILES_TRACEEVENTIDS_HPP
-</xsl:template>
-
-</xsl:stylesheet>
--- a/src/share/vm/trace/traceMacros.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_TRACE_TRACEMACROS_HPP
-#define SHARE_VM_TRACE_TRACEMACROS_HPP
-
-#define EVENT_THREAD_EXIT(thread)
-#define EVENT_THREAD_DESTRUCT(thread)
-
-#define TRACE_INIT_ID(k)
-#define TRACE_DATA TraceThreadData
-
-#define TRACE_START() JNI_OK
-#define TRACE_INITIALIZE() JNI_OK
-
-#define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1
-#define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2
-#define TRACE_DEFINE_OFFSET typedef int ___IGNORED_hs_trace_type3
-#define TRACE_ID_OFFSET in_ByteSize(0); ShouldNotReachHere()
-#define TRACE_TEMPLATES(template)
-#define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)
-
-#endif // SHARE_VM_TRACE_TRACEMACROS_HPP
--- a/src/share/vm/trace/traceStream.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_TRACE_TRACESTREAM_HPP
-#define SHARE_VM_TRACE_TRACESTREAM_HPP
-
-#include "utilities/macros.hpp"
-#if INCLUDE_TRACE
-#include "memory/resourceArea.hpp"
-#include "oops/klass.hpp"
-#include "oops/method.hpp"
-#include "oops/symbol.hpp"
-#include "utilities/ostream.hpp"
-
-class TraceStream : public StackObj {
- private:
-  outputStream& _st;
-
- public:
-  TraceStream(outputStream& stream): _st(stream) {}
-
-  void print_val(const char* label, u1 val) {
-    _st.print("%s = " UINT32_FORMAT, label, val);
-  }
-
-  void print_val(const char* label, u2 val) {
-    _st.print("%s = " UINT32_FORMAT, label, val);
-  }
-
-  void print_val(const char* label, s2 val) {
-    _st.print("%s = " INT32_FORMAT, label, val);
-  }
-
-  void print_val(const char* label, u4 val) {
-    _st.print("%s = " UINT32_FORMAT, label, val);
-  }
-
-  void print_val(const char* label, s4 val) {
-    _st.print("%s = " INT32_FORMAT, label, val);
-  }
-
-  void print_val(const char* label, u8 val) {
-    _st.print("%s = " UINT64_FORMAT, label, val);
-  }
-
-  void print_val(const char* label, s8 val) {
-    _st.print("%s = " INT64_FORMAT, label, (int64_t) val);
-  }
-
-  void print_val(const char* label, bool val) {
-    _st.print("%s = %s", label, val ? "true" : "false");
-  }
-
-  void print_val(const char* label, float val) {
-    _st.print("%s = %f", label, val);
-  }
-
-  void print_val(const char* label, double val) {
-    _st.print("%s = %f", label, val);
-  }
-
-  void print_val(const char* label, const Klass* const val) {
-    ResourceMark rm;
-    const char* description = "NULL";
-    if (val != NULL) {
-      Symbol* name = val->name();
-      if (name != NULL) {
-        description = name->as_C_string();
-      }
-    }
-    _st.print("%s = %s", label, description);
-  }
-
-  void print_val(const char* label, const Method* const val) {
-    ResourceMark rm;
-    const char* description = "NULL";
-    if (val != NULL) {
-      description = val->name_and_sig_as_C_string();
-    }
-    _st.print("%s = %s", label, description);
-  }
-
-  void print_val(const char* label, const char* val) {
-    _st.print("%s = '%s'", label, val);
-  }
-
-  void print(const char* val) {
-    _st.print("%s", val);
-  }
-};
-
-#endif // INCLUDE_TRACE
-#endif // SHARE_VM_TRACE_TRACESTREAM_HPP
--- a/src/share/vm/trace/traceTime.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_TRACE_TRACETIME_HPP
-#define SHARE_VM_TRACE_TRACETIME_HPP
-
-#include "prims/jni.h"
-
-typedef jlong TracingTime;
-
-#endif // SHARE_VM_TRACE_TRACETIME_HPP
--- a/src/share/vm/trace/traceTypes.xsl	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
--->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:import href="xsl_util.xsl"/>
-<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-
-<xsl:template match="/">
-  <xsl:call-template name="file-header"/>
-
-#ifndef TRACEFILES_TRACETYPES_HPP
-#define TRACEFILES_TRACETYPES_HPP
-
-#include "oops/symbol.hpp"
-#include "trace/traceDataTypes.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/ticks.hpp"
-
-enum JVMContentType {
-  _not_a_content_type = (JVM_CONTENT_TYPES_START - 1),
-  
-<xsl:for-each select="trace/types/content_types/content_type[@jvm_type]">
-  <xsl:value-of select="concat('  CONTENT_TYPE_', @jvm_type, ',',  $newline)"/>
-</xsl:for-each>
-  NUM_JVM_CONTENT_TYPES
-};
-
-
-enum JVMEventRelations {
-  JVM_REL_NOT_AVAILABLE = 0,
-  
-<xsl:for-each select="trace/relation_decls/relation_decl">
-  <xsl:value-of select="concat('  JVM_REL_', @id, ',', $newline)"/>
-</xsl:for-each>
-  NUM_EVENT_RELATIONS
-};
-
-/**
- * Create typedefs for the TRACE types:
- *   typedef s8 TYPE_LONG;
- *   typedef s4 TYPE_INTEGER;
- *   typedef const char * TYPE_STRING;
- *   ...
- */
-<xsl:for-each select="trace/types/primary_types/primary_type">
-typedef <xsl:value-of select="@type"/>  TYPE_<xsl:value-of select="@symbol"/>;
-</xsl:for-each>
-
-#endif // TRACEFILES_TRACETYPES_HPP
-</xsl:template>
-
-</xsl:stylesheet>
--- a/src/share/vm/trace/tracetypes.xml	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,376 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
--->
-
-<!DOCTYPE types SYSTEM "trace.dtd">
-
-<!--
-
-Content types (complex) should create constant pool data
-in the recording.
-Currently at least, there is _NO_ verification that whatever
-writer you have is actually writing correctly. So BE CAREFUL!
-
-Declared with the 'content_type' tag.
-
-<type> is the ID type, i.e the integer type that resolves this. Most often
-U4 or U8, but for example really small number constants, like GCTYPE uses U1.
-
-<content-type> is where it gets interesting. 'builtin_type' means we're
-defining how we resolve one of the trace built-in types (Class, Thread etc),
-jvm_type means defining a new one for our own use.
-
-Example: (GcMode)
-
-<content_type id="GCMode" hr_name="GC mode" type="U1" jvm_type="GCMODE">
-  <value type="UTF8" field="desc" description="Description"/>
-</content_type>
-
-This creates a content type CONTENT_TYPE_GCMODE
-The field type referencing it is u1 (U1), and the constant pool struct has one field, the name.
-
-Before we can use it we need also define a primary field data type:
-
-<primary_type symbol="GCMODE" datatype="U1" contenttype="NONE"
-              type="u8" sizeop="sizeof(u1)"/>
-
-Now we can use the content + data type in declaring event fields.
- -->
-
- <types>
-  <content_types>
-    <content_type id="Thread" hr_name="Thread"
-                  type="U4" builtin_type="OSTHREAD">
-      <value type="UTF8" field="name" label="Thread name"/>
-    </content_type>
-
-    <content_type id="VMThread" hr_name="VM Thread"
-                  type="U8" jvm_type="VMTHREAD">
-      <value type="OSTHREAD" field="thread" label="VM Thread"/>
-    </content_type>
-
-    <content_type id="JavaThread" hr_name="Java thread"
-                  type="U8" builtin_type="JAVALANGTHREAD">
-      <value type="OSTHREAD" field="thread" label="OS Thread ID"/>
-      <value type="BYTES64" field="allocInsideTla"
-             label="Allocated bytes inside TLAs"/>
-      <value type="BYTES64" field="allocOutsideTla"
-             label="Allocated bytes outside TLAs"/>
-      <value type="THREADGROUP" field="group" label="Java Thread Group"/>
-    </content_type>
-
-    <content_type id="ThreadGroup" hr_name="Thread group"
-                  type="U4" jvm_type="THREADGROUP">
-      <value type="THREADGROUP" field="parent" label="Parent"/>
-      <value type="UTF8" field="name" label="Name"/>
-    </content_type>
-
-    <content_type id="Class" hr_name="Java class"
-                  type="U8" builtin_type="CLASS">
-      <value type="CLASS" field="loaderClass" label="ClassLoader"/>
-      <value type="SYMBOL" field="name" label="Name"/>
-      <value type="SHORT" field="modifiers" label="Access modifiers"/>
-    </content_type>
-
-    <content_type id="Method" hr_name="Java method"
-                  type="U8" jvm_type="METHOD">
-      <value type="CLASS" field="class" label="Class"/>
-      <value type="SYMBOL" field="name" label="Name"/>
-      <value type="SYMBOL" field="signature" label="Signature"/>
-      <value type="SHORT" field="modifiers" label="Access modifiers"/>
-      <value type="BOOLEAN" field="hidden" label="Hidden"/>
-    </content_type>
-
-    <content_type id="UTFConstant" hr_name="UTF constant"
-                  type="U8" jvm_type="SYMBOL">
-      <value type="UTF8" field="utf8" label="UTF8 data"/>
-    </content_type>
-
-    <content_type id="ThreadState" hr_name="Java Thread State"
-                  type="U2" jvm_type="THREADSTATE">
-      <value type="UTF8" field="name" label="Name"/>
-    </content_type>
-
-    <content_type id="GCName" hr_name="GC Name"
-                  type="U1" jvm_type="GCNAME">
-      <value type="UTF8" field="name" label="name" />
-    </content_type>
-
-    <content_type id="GCCause" hr_name="GC Cause"
-                  type="U2" jvm_type="GCCAUSE">
-      <value type="UTF8" field="cause" label="cause" />
-    </content_type>
-
-    <content_type id="GCWhen" hr_name="GC When"
-                  type="U1" jvm_type="GCWHEN">
-      <value type="UTF8" field="when" label="when" />
-    </content_type>
-
-    <content_type id="G1YCType" hr_name="G1 YC Type"
-                  type="U1" jvm_type="G1YCTYPE">
-      <value type="UTF8" field="type" label="type" />
-    </content_type>
-
-    <content_type id="GCThresholdUpdater" hr_name="GC Treshold Updater"
-                  type="U1" jvm_type="GCTHRESHOLDUPDATER">
-      <value type="UTF8" field="updater" label="updater" />
-    </content_type>
-
-    <content_type id="ReferenceType" hr_name="Reference Type"
-                  type="U1" jvm_type="REFERENCETYPE">
-      <value type="UTF8" field="type" label="type" />
-    </content_type>
-
-    <content_type id="MetadataType" hr_name="Metadata Type"
-                  type="U1" jvm_type="METADATATYPE">
-      <value type="UTF8" field="type" label="type" />
-    </content_type>
-
-    <content_type id="MetaspaceObjectType" hr_name="Metaspace Object Type"
-                  type="U1" jvm_type="METASPACEOBJTYPE">
-      <value type="UTF8" field="type" label="type" />
-    </content_type>
-
-    <content_type id="NARROW_OOP_MODE" hr_name="Narrow Oop Mode"
-                  type="U1" jvm_type="NARROWOOPMODE">
-      <value type="UTF8" field="mode" label="mode" />
-    </content_type>
-
-    <content_type id="VMOperationType" hr_name="VM Operation Type"
-                  type="U2" jvm_type="VMOPERATIONTYPE">
-      <value type="UTF8" field="type" label="type" />
-    </content_type>
-
-    <content_type id="CompilerPhaseType" hr_name="Compiler Phase Type"
-                  type="U1" jvm_type="COMPILERPHASETYPE">
-      <value type="UTF8" field="phase" label="phase" />
-    </content_type>
-
-    <content_type id="FlagValueOrigin" hr_name="Flag Value Origin"
-                  type="U1" jvm_type="FLAGVALUEORIGIN">
-      <value type="UTF8" field="origin" label="origin" />
-    </content_type>
-
-  </content_types>
-
-
-  <primary_types>
-    <!--
-      - primary_type takes these attributes:
-      -   symbol      INTEGER, LONG etc
-      -   datatype    The trace datatype, see enum DataType
-      -   contenttype Either resolved content type or the semantic meaning
-      -   type        The actual type as used in structures etc
-      -   sizeop      A function/macro that can be applied on a single
-      -               struct value of type "type" and yield the factual byte
-      -               size we need to write.  The % is replaced by the value
-      -->
-
-    <!-- SIGNED 64bit -->
-    <primary_type symbol="LONG" datatype="LONG" contenttype="NONE"
-                  type="s8" sizeop="sizeof(s8)"/>
-
-    <!-- UNSIGNED 64bit -->
-    <primary_type symbol="ULONG" datatype="U8" contenttype="NONE"
-                  type="u8" sizeop="sizeof(u8)"/>
-
-    <!-- SIGNED 32bit -->
-    <primary_type symbol="INTEGER" datatype="INT" contenttype="NONE"
-                  type="s4" sizeop="sizeof(s4)"/>
-
-    <!-- UNSIGNED 32bit -->
-    <primary_type symbol="UINT" datatype="U4" contenttype="NONE"
-                  type="unsigned" sizeop="sizeof(unsigned)"/>
-
-    <!-- UNSIGNED 16bit -->
-    <primary_type symbol="USHORT" datatype="U2" contenttype="NONE"
-                  type="u2" sizeop="sizeof(u2)"/>
-
-    <!--  SIGNED 16bit -->
-    <primary_type symbol="SHORT" datatype="SHORT" contenttype="NONE"
-                  type="s2" sizeop="sizeof(s2)"/>
-
-    <!--  SIGNED 8bit -->
-    <primary_type symbol="BYTE" datatype="BYTE" contenttype="NONE"
-                  type="s1" sizeop="sizeof(s1)"/>
-
-    <!--  UNSIGNED 8bit -->
-    <primary_type symbol="UBYTE" datatype="U1" contenttype="NONE"
-                  type="u1" sizeop="sizeof(u1)"/>
-
-    <!--  float 32bit -->
-    <primary_type symbol="FLOAT" datatype="FLOAT" contenttype="NONE"
-                  type="float" sizeop="sizeof(float)"/>
-
-    <!--  float 64bit -->
-    <primary_type symbol="DOUBLE" datatype="DOUBLE" contenttype="NONE"
-                  type="double" sizeop="sizeof(double)"/>
-
-    <!-- boolean type (1-byte) -->
-    <primary_type symbol="BOOLEAN" datatype="BOOLEAN" contenttype="NONE"
-                  type="bool" sizeop="1"/>
-
-    <!-- 32-bit unsigned integer, SEMANTIC value BYTES -->
-    <primary_type symbol="BYTES" datatype="U4" contenttype="BYTES"
-                  type="u4" sizeop="sizeof(u4)"/>
-
-    <primary_type symbol="IOBYTES" datatype="U4" contenttype="BYTES"
-                  type="u4" sizeop="sizeof(u4)"/>
-
-    <!-- 64-bit unsigned integer, SEMANTIC value BYTES -->
-    <primary_type symbol="BYTES64" datatype="U8" contenttype="BYTES"
-                  type="u8" sizeop="sizeof(u8)"/>
-
-    <!-- 64-bit unsigned integer, SEMANTIC value ABSOLUTE MILLISECONDS -->
-    <primary_type symbol="EPOCHMILLIS" datatype="LONG" contenttype="EPOCHMILLIS"
-                  type="s8" sizeop="sizeof(s8)"/>
-
-    <!-- 64-bit unsigned integer, SEMANTIC value RELATIVE MILLISECONDS -->
-    <primary_type symbol="MILLIS" datatype="LONG" contenttype="MILLIS"
-                  type="s8" sizeop="sizeof(s8)"/>
-
-    <!-- 64-bit unsigned integer, SEMANTIC value RELATIVE NANOSECONDS -->
-    <primary_type symbol="NANOS" datatype="LONG" contenttype="NANOS"
-                  type="s8" sizeop="sizeof(s8)"/>
-
-    <!-- 64-bit signed integer, SEMANTIC value TICKS -->
-    <primary_type symbol="TICKS" datatype="LONG" contenttype="TICKS"
-                  type="Ticks" sizeop="sizeof(s8)"/>
-
-    <!-- 64-bit signed integer, SEMANTIC value TICKS duration -->
-    <primary_type symbol="TICKSPAN" datatype="LONG" contenttype="TICKS"
-                  type="Tickspan" sizeop="sizeof(s8)"/>
-
-    <!-- 64-bit unsigned integer, SEMANTIC value ADDRESS (mem loc) -->
-    <primary_type symbol="ADDRESS" datatype="U8" contenttype="ADDRESS"
-                  type="u8" sizeop="sizeof(u8)"/>
-
-    <!-- 32-bit float, SEMANTIC value PERCENTAGE (0.0-1.0) -->
-    <primary_type symbol="PERCENT" datatype="FLOAT" contenttype="PERCENTAGE"
-                  type="float" sizeop="sizeof(float)"/>
-
-    <!-- UTF-encoded string, max length 64k -->
-    <primary_type symbol="UTF8" datatype="UTF8" contenttype="NONE"
-                  type="const char *" sizeop="sizeof_utf(%)"/>
-
-    <!-- UTF-16 encoded (Unicode) string, max length maxjuint -->
-    <primary_type symbol="STRING" datatype="STRING" contenttype="NONE"
-                  type="TraceUnicodeString*" sizeop="sizeof_unicode(%)"/>
-
-    <!-- Symbol* constant. Note that this may currently ONLY be used by
-          classes, methods fields.  This restriction might be lifted. -->
-    <primary_type symbol="SYMBOL" datatype="U8" contenttype="SYMBOL"
-                  type="Symbol *" sizeop="sizeof(u8)"/>
-
-    <!-- A Klass *. The actual class is marked as "used" and will
-         eventually be written into the recording constant pool -->
-    <primary_type symbol="CLASS" datatype="U8" contenttype="CLASS"
-                  type="Klass *" sizeop="sizeof(u8)"/>
-
-    <!-- A Method *. The method is marked as "used" and will eventually be
-         written into the recording constant pool. -->
-    <primary_type symbol="METHOD" datatype="U8" contenttype="METHOD"
-                  type="Method *" sizeop="sizeof(u8)"/>
-
-    <!--  The type for stacktraces in the recording. Shoudl not be used by
-          events explicitly -->
-    <primary_type symbol="STACKTRACE" datatype="U8" contenttype="STACKTRACE"
-                  type="u8" sizeop="sizeof(u8)"/>
-
-    <!-- OS Thread ID -->
-    <primary_type symbol="OSTHREAD" datatype="U4" contenttype="OSTHREAD"
-                  type="u4" sizeop="sizeof(u4)"/>
-
-    <!-- VM Thread ID Note: changed from U2 to U8 for hotspot -->
-    <primary_type symbol="VMTHREAD" datatype="U8" contenttype="VMTHREAD"
-                  type="u8"  sizeop="sizeof(u8)"/>
-
-    <!-- Java Thread ID -->
-    <primary_type symbol="JAVALANGTHREAD" datatype="LONG"
-                  contenttype="JAVALANGTHREAD" type="s8"
-                  sizeop="sizeof(s8)"/>
-
-    <!-- Threadgroup THIS TYPE MAY NOT BE USED IN NORMAL EVENTS (ATM). Only
-          for thread constant pool // KK TODO: u8 should be ObjectP -->
-    <primary_type symbol="THREADGROUP" datatype="U4" contenttype="THREADGROUP"
-                  type="u8"
-                  sizeop="sizeof(u4)"/>
-
-    <!-- FRAMETYPE enum -->
-    <primary_type symbol="FRAMETYPE" datatype="U1" contenttype="FRAMETYPE"
-                  type="u1" sizeop="sizeof(u1)"/>
-
-    <!-- THREADSTATE enum -->
-    <primary_type symbol="THREADSTATE" datatype="U2" contenttype="THREADSTATE"
-                  type="u2" sizeop="sizeof(u2)"/>
-
-    <!-- GCName -->
-    <primary_type symbol="GCNAME" datatype="U1" contenttype="GCNAME"
-                  type="u1" sizeop="sizeof(u1)" />
-
-    <!-- GCCAUSE -->
-    <primary_type symbol="GCCAUSE" datatype="U2" contenttype="GCCAUSE"
-                  type="u2" sizeop="sizeof(u2)" />
-
-    <!-- GCWHEN -->
-    <primary_type symbol="GCWHEN" datatype="U1" contenttype="GCWHEN"
-                  type="u1" sizeop="sizeof(u1)" />
-
-    <!-- G1YCType -->
-    <primary_type symbol="G1YCTYPE" datatype="U1" contenttype="G1YCTYPE"
-                  type="u1" sizeop="sizeof(u1)" />
-
-    <!-- GCTHRESHOLDUPDATER -->
-    <primary_type symbol="GCTHRESHOLDUPDATER" datatype="U1" contenttype="GCTHRESHOLDUPDATER"
-                  type="u1" sizeop="sizeof(u1)" />
-
-    <!-- REFERENCETYPE -->
-    <primary_type symbol="REFERENCETYPE" datatype="U1"
-                  contenttype="REFERENCETYPE" type="u1" sizeop="sizeof(u1)" />
-
-    <!-- METADATATYPE -->
-    <primary_type symbol="METADATATYPE" datatype="U1"
-                  contenttype="METADATATYPE" type="u1" sizeop="sizeof(u1)" />
-
-    <!-- METADATAOBJTYPE -->
-    <primary_type symbol="METASPACEOBJTYPE" datatype="U1"
-                  contenttype="METASPACEOBJTYPE" type="u1" sizeop="sizeof(u1)" />
-
-    <!-- NARROWOOPMODE -->
-    <primary_type symbol="NARROWOOPMODE" datatype="U1"
-                  contenttype="NARROWOOPMODE" type="u1" sizeop="sizeof(u1)" />
-
-    <!-- COMPILERPHASETYPE -->
-    <primary_type symbol="COMPILERPHASETYPE" datatype="U1"
-                  contenttype="COMPILERPHASETYPE" type="u1" sizeop="sizeof(u1)" />
-
-    <!-- VMOPERATIONTYPE -->
-    <primary_type symbol="VMOPERATIONTYPE" datatype="U2" contenttype="VMOPERATIONTYPE"
-                  type="u2" sizeop="sizeof(u2)" />
-                  
-    <!-- FLAGVALUEORIGIN -->
-    <primary_type symbol="FLAGVALUEORIGIN" datatype="U1"
-                  contenttype="FLAGVALUEORIGIN" type="u1" sizeop="sizeof(u1)" />
-
-  </primary_types>
-</types>
--- a/src/share/vm/trace/tracing.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_TRACE_TRACING_HPP
-#define SHARE_VM_TRACE_TRACING_HPP
-
-#include "tracefiles/traceEventClasses.hpp"
-#include "tracefiles/traceEventIds.hpp"
-
-#endif // SHARE_VM_TRACE_TRACING_HPP
--- a/src/share/vm/trace/xinclude.mod	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
-  
--->
-<!ELEMENT xi:include (xi:fallback?) >
-<!ATTLIST xi:include
-    xmlns:xi   CDATA       #FIXED    "http://www.w3.org/2001/XInclude"
-    href       CDATA       #IMPLIED
-    parse      (xml|text)  "xml"
-    xpointer   CDATA       #IMPLIED
-    encoding   CDATA       #IMPLIED 
-    accept     CDATA       #IMPLIED
-    accept-language CDATA  #IMPLIED >
-
-<!ELEMENT xi:fallback ANY>
-<!ATTLIST xi:fallback
-    xmlns:xi   CDATA   #FIXED   "http://www.w3.org/2001/XInclude" >
--- a/src/share/vm/trace/xsl_util.xsl	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
- This code is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 only, as
- published by the Free Software Foundation.
-
- This code is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- version 2 for more details (a copy is included in the LICENSE file that
- accompanied this code).
-
- You should have received a copy of the GNU General Public License version
- 2 along with this work; if not, write to the Free Software Foundation,
- Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
- Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- or visit www.oracle.com if you need additional information or have any
- questions.
--->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-
-<!-- utilities used when generating code -->
-
-<xsl:variable name="newline">
-  <xsl:text>&#xA;</xsl:text>
-</xsl:variable>
-
-<xsl:variable name="indent1">
-  <xsl:text>&#xA;  </xsl:text>
-</xsl:variable>
-
-<xsl:variable name="indent2">
-  <xsl:text>&#xA;    </xsl:text>
-</xsl:variable>
-
-<xsl:variable name="indent3">
-  <xsl:text>&#xA;      </xsl:text>
-</xsl:variable>
-
-<xsl:variable name="indent4">
-  <xsl:text>&#xA;        </xsl:text>
-</xsl:variable>
-
-<xsl:variable name="quote">
-  <xsl:text>"</xsl:text>
-</xsl:variable>
-
-<xsl:template name="file-header">
-  <xsl:text>/* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */</xsl:text>
-</xsl:template>
-
-<xsl:template name="string-replace-all">
-  <xsl:param name="text" />
-  <xsl:param name="replace" />
-  <xsl:param name="by" />
-  <xsl:choose>
-    <xsl:when test="contains($text, $replace)">
-      <xsl:value-of select="substring-before($text,$replace)" />
-      <xsl:value-of select="$by" />
-      <xsl:call-template name="string-replace-all">
-        <xsl:with-param name="text" select="substring-after($text,$replace)" />
-        <xsl:with-param name="replace" select="$replace" />
-        <xsl:with-param name="by" select="$by" />
-      </xsl:call-template>
-    </xsl:when>
-    <xsl:otherwise>
-      <xsl:value-of select="$text" />
-    </xsl:otherwise>
-  </xsl:choose>
-</xsl:template>
-
-
-</xsl:stylesheet>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/align.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_ALIGN_HPP
+#define SHARE_VM_UTILITIES_ALIGN_HPP
+
+#include "utilities/globalDefinitions.hpp"
+
+// Signed variants of alignment helpers.  There are two versions of each, a macro
+// for use in places like enum definitions that require compile-time constant
+// expressions and a function for all other places so as to get type checking.
+
+// Using '(what) & ~align_mask(alignment)' to align 'what' down is broken when
+// 'alignment' is an unsigned int and 'what' is a wider type. The & operation
+// will widen the inverted mask, and not sign extend it, leading to a mask with
+// zeros in the most significant bits. The use of align_mask_widened() solves
+// this problem.
+#define align_mask(alignment) ((alignment) - 1)
+#define widen_to_type_of(what, type_carrier) (true ? (what) : (type_carrier))
+#define align_mask_widened(alignment, type_carrier) widen_to_type_of(align_mask(alignment), (type_carrier))
+
+#define align_down_(size, alignment) ((size) & ~align_mask_widened((alignment), (size)))
+
+#define align_up_(size, alignment) (align_down_((size) + align_mask(alignment), (alignment)))
+
+#define is_aligned_(size, alignment) (((size) & align_mask(alignment)) == 0)
+
+// Temporary declaration until this file has been restructured.
+template <typename T>
+bool is_power_of_2_t(T x) {
+  return (x != T(0)) && ((x & (x - 1)) == T(0));
+}
+
+// Helpers to align sizes and check for alignment
+
+template <typename T, typename A>
+inline T align_up(T size, A alignment) {
+  assert(is_power_of_2_t(alignment), "must be a power of 2");
+
+  T ret = align_up_(size, alignment);
+  assert(is_aligned_(ret, alignment), "must be aligned");
+
+  return ret;
+}
+
+template <typename T, typename A>
+inline T align_down(T size, A alignment) {
+  assert(is_power_of_2_t(alignment), "must be a power of 2");
+
+  T ret = align_down_(size, alignment);
+  assert(is_aligned_(ret, alignment), "must be aligned");
+
+  return ret;
+}
+
+template <typename T, typename A>
+inline bool is_aligned(T size, A alignment) {
+  assert(is_power_of_2_t(alignment), "must be a power of 2");
+
+  return is_aligned_(size, alignment);
+}
+
+// Align down with a lower bound. If the aligning results in 0, return 'alignment'.
+template <typename T, typename A>
+inline T align_down_bounded(T size, A alignment) {
+  A aligned_size = align_down(size, alignment);
+  return aligned_size > 0 ? aligned_size : alignment;
+}
+
+// Helpers to align pointers and check for alignment.
+
+template <typename T, typename A>
+inline T* align_up(T* ptr, A alignment) {
+  return (T*)align_up((uintptr_t)ptr, alignment);
+}
+
+template <typename T, typename A>
+inline T* align_down(T* ptr, A alignment) {
+  return (T*)align_down((uintptr_t)ptr, alignment);
+}
+
+template <typename T, typename A>
+inline bool is_aligned(T* ptr, A alignment) {
+  return is_aligned((uintptr_t)ptr, alignment);
+}
+
+// Align metaspace objects by rounding up to natural word boundary
+template <typename T>
+inline T align_metadata_size(T size) {
+  return align_up(size, 1);
+}
+
+// Align objects in the Java Heap by rounding up their size, in HeapWord units.
+template <typename T>
+inline T align_object_size(T word_size) {
+  return align_up(word_size, MinObjAlignment);
+}
+
+inline bool is_object_aligned(size_t word_size) {
+  return is_aligned(word_size, MinObjAlignment);
+}
+
+inline bool is_object_aligned(const void* addr) {
+  return is_aligned(addr, MinObjAlignmentInBytes);
+}
+
+// Pad out certain offsets to jlong alignment, in HeapWord units.
+template <typename T>
+inline T align_object_offset(T offset) {
+  return align_up(offset, HeapWordsPerLong);
+}
+
+// Clamp an address to be within a specific page
+// 1. If addr is on the page it is returned as is
+// 2. If addr is above the page_address the start of the *next* page will be returned
+// 3. Otherwise, if addr is below the page_address the start of the page will be returned
+template <typename T>
+inline T* clamp_address_in_page(T* addr, T* page_address, size_t page_size) {
+  if (align_down(addr, page_size) == align_down(page_address, page_size)) {
+    // address is in the specified page, just return it as is
+    return addr;
+  } else if (addr > page_address) {
+    // address is above specified page, return start of next page
+    return align_down(page_address, page_size) + page_size;
+  } else {
+    // address is below specified page, return start of page
+    return align_down(page_address, page_size);
+  }
+}
+
+#endif // SHARE_VM_UTILITIES_ALIGN_HPP
--- a/src/share/vm/utilities/bitMap.inline.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/bitMap.inline.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_UTILITIES_BITMAP_INLINE_HPP
 #define SHARE_VM_UTILITIES_BITMAP_INLINE_HPP
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "utilities/bitMap.hpp"
 
 #ifdef ASSERT
--- a/src/share/vm/utilities/globalDefinitions.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -42,6 +42,14 @@
 # include "utilities/globalDefinitions_xlc.hpp"
 #endif
 
+// Defaults for macros that might be defined per compiler.
+#ifndef NOINLINE
+#define NOINLINE
+#endif
+#ifndef ALWAYSINLINE
+#define ALWAYSINLINE inline
+#endif
+
 #ifndef PRAGMA_DIAG_PUSH
 #define PRAGMA_DIAG_PUSH
 #endif
--- a/src/share/vm/utilities/globalDefinitions_gcc.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -263,17 +263,18 @@
 #define PRAGMA_IMPLEMENTATION        #pragma implementation
 #define VALUE_OBJ_CLASS_SPEC
 
-#ifndef ATTRIBUTE_PRINTF
 // Diagnostic pragmas like the ones defined below in PRAGMA_FORMAT_NONLITERAL_IGNORED
 // were only introduced in GCC 4.2. Because we have no other possibility to ignore
 // these warnings for older versions of GCC, we simply don't decorate our printf-style
 // functions with __attribute__(format) in that case.
 #if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 2)) || (__GNUC__ > 4)
+#ifndef ATTRIBUTE_PRINTF
 #define ATTRIBUTE_PRINTF(fmt,vargs)  __attribute__((format(printf, fmt, vargs)))
-#else
-#define ATTRIBUTE_PRINTF(fmt,vargs)
 #endif
+#ifndef ATTRIBUTE_SCANF
+#define ATTRIBUTE_SCANF(fmt,vargs)  __attribute__((format(scanf, fmt, vargs)))
 #endif
+#endif // gcc version check
 
 #define PRAGMA_FORMAT_NONLITERAL_IGNORED _Pragma("GCC diagnostic ignored \"-Wformat-nonliteral\"") \
                                          _Pragma("GCC diagnostic ignored \"-Wformat-security\"")
@@ -333,4 +334,8 @@
 #define JLONG_FORMAT           "%ld"
 #endif // _LP64 && __APPLE__
 
+// Inlining support
+#define NOINLINE     __attribute__ ((noinline))
+#define ALWAYSINLINE inline __attribute__ ((always_inline))
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP
--- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -281,4 +281,8 @@
 
 #define offset_of(klass,field) offsetof(klass,field)
 
+// Inlining support
+#define NOINLINE
+#define ALWAYSINLINE inline __attribute__((always_inline))
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -171,6 +171,11 @@
 #define strdup _strdup
 #endif
 
+#if _MSC_VER < 1800
+// Fixes some wrong warnings about 'this' : used in base member initializer list
+#pragma warning( disable : 4355 )
+#endif
+
 #pragma warning( disable : 4100 ) // unreferenced formal parameter
 #pragma warning( disable : 4127 ) // conditional expression is constant
 #pragma warning( disable : 4514 ) // unreferenced inline function has been removed
@@ -218,4 +223,11 @@
 
 #define offset_of(klass,field) offsetof(klass,field)
 
+// Inlining support
+// MSVC has '__declspec(noinline)' but according to the official documentation
+// it only applies to member functions. There are reports though which pretend
+// that it also works for freestanding functions.
+#define NOINLINE     __declspec(noinline)
+#define ALWAYSINLINE __forceinline
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP
--- a/src/share/vm/utilities/globalDefinitions_xlc.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/globalDefinitions_xlc.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -180,5 +180,7 @@
 #define SIZE_64G  ((uint64_t)  0x1000000000ULL)
 #define SIZE_1T   ((uint64_t) 0x10000000000ULL)
 
+#define NOINLINE     __attribute__((__noinline__))
+#define ALWAYSINLINE inline __attribute__((__always_inline__))
 
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
--- a/src/share/vm/utilities/growableArray.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/growableArray.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -168,6 +168,10 @@
   GrowableArray(int initial_size, bool C_heap = false, MEMFLAGS F = mtInternal)
     : GenericGrowableArray(initial_size, 0, C_heap, F) {
     _data = (E*)raw_allocate(sizeof(E));
+// Needed for Visual Studio 2012 and older
+#ifdef _MSC_VER
+#pragma warning(suppress: 4345)
+#endif
     for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E();
   }
 
@@ -372,6 +376,40 @@
   void sort(int f(E*,E*), int stride) {
     qsort(_data, length() / stride, sizeof(E) * stride, (_sort_Fn)f);
   }
+
+  // Binary search and insertion utility.  Search array for element
+  // matching key according to the static compare function.  Insert
+  // that element is not already in the list.  Assumes the list is
+  // already sorted according to compare function.
+  template <int compare(const E&, const E&)> E insert_sorted(const E& key) {
+    bool found;
+    int location = find_sorted<E, compare>(key, found);
+    if (!found) {
+      insert_before(location, key);
+    }
+    return at(location);
+  }
+
+  template <typename K, int compare(const K&, const E&)> int find_sorted(const K& key, bool& found) {
+    found = false;
+    int min = 0;
+    int max = length() - 1;
+
+    while (max >= min) {
+      int mid = (int)(((uint)max + min) / 2);
+      E value = at(mid);
+      int diff = compare(key, value);
+      if (diff > 0) {
+        min = mid + 1;
+      } else if (diff < 0) {
+        max = mid - 1;
+      } else {
+        found = true;
+        return mid;
+      }
+    }
+    return min;
+  }
 };
 
 // Global GrowableArray methods (one instance in the library per each 'E' type).
@@ -385,6 +423,10 @@
     E* newData = (E*)raw_allocate(sizeof(E));
     int i = 0;
     for (     ; i < _len; i++) ::new ((void*)&newData[i]) E(_data[i]);
+// Needed for Visual Studio 2012 and older
+#ifdef _MSC_VER
+#pragma warning(suppress: 4345)
+#endif
     for (     ; i < _max; i++) ::new ((void*)&newData[i]) E();
     for (i = 0; i < old_max; i++) _data[i].~E();
     if (on_C_heap() && _data != NULL) {
--- a/src/share/vm/utilities/macros.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/macros.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -160,9 +160,15 @@
 #define NOT_NMT_RETURN_(code) { return code; }
 #endif // INCLUDE_NMT
 
-#ifndef INCLUDE_TRACE
-#define INCLUDE_TRACE 1
-#endif // INCLUDE_TRACE
+#ifndef INCLUDE_JFR
+#define INCLUDE_JFR 1
+#endif
+
+#if INCLUDE_JFR
+#define JFR_ONLY(code) code
+#else
+#define JFR_ONLY(code)
+#endif
 
 // COMPILER1 variant
 #ifdef COMPILER1
--- a/src/share/vm/utilities/ticks.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/ticks.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,45 +24,113 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "utilities/ticks.inline.hpp"
+#include "utilities/ticks.hpp"
 
-#ifdef ASSERT
- const jlong Ticks::invalid_time_stamp = -2; // 0xFFFF FFFF`FFFF FFFE
+#ifdef X86
+#include "rdtsc_x86.hpp"
 #endif
 
-void Ticks::stamp() {
-  _stamp_ticks = os::elapsed_counter();
+template <typename TimeSource, const int unit>
+inline double conversion(typename TimeSource::Type& value) {
+  return (double)value * ((double)unit / (double)TimeSource::frequency());
+}
+
+uint64_t ElapsedCounterSource::frequency() {
+  static const uint64_t freq = (uint64_t)os::elapsed_frequency();
+  return freq;
+}
+
+ElapsedCounterSource::Type ElapsedCounterSource::now() {
+  return os::elapsed_counter();
+}
+
+double ElapsedCounterSource::seconds(Type value) {
+  return conversion<ElapsedCounterSource, 1>(value);
 }
 
-const Ticks Ticks::now() {
-  Ticks t;
-  t.stamp();
-  return t;
+uint64_t ElapsedCounterSource::milliseconds(Type value) {
+  return (uint64_t)conversion<ElapsedCounterSource, MILLIUNITS>(value);
+}
+
+uint64_t ElapsedCounterSource::microseconds(Type value) {
+  return (uint64_t)conversion<ElapsedCounterSource, MICROUNITS>(value);
+}
+
+uint64_t ElapsedCounterSource::nanoseconds(Type value) {
+  return (uint64_t)conversion<ElapsedCounterSource, NANOUNITS>(value);
+}
+
+uint64_t FastUnorderedElapsedCounterSource::frequency() {
+#ifdef X86
+  static bool valid_rdtsc = Rdtsc::initialize();
+  if (valid_rdtsc) {
+    static const uint64_t freq = (uint64_t)Rdtsc::frequency();
+    return freq;
+  }
+#endif
+  static const uint64_t freq = (uint64_t)os::elapsed_frequency();
+  return freq;
 }
 
-Tickspan::Tickspan(const Ticks& end, const Ticks& start) {
-  assert(end.value() != Ticks::invalid_time_stamp, "end is unstamped!");
-  assert(start.value() != Ticks::invalid_time_stamp, "start is unstamped!");
+FastUnorderedElapsedCounterSource::Type FastUnorderedElapsedCounterSource::now() {
+#ifdef X86
+  static bool valid_rdtsc = Rdtsc::initialize();
+  if (valid_rdtsc) {
+    return Rdtsc::elapsed_counter();
+  }
+#endif
+  return os::elapsed_counter();
+}
+
+double FastUnorderedElapsedCounterSource::seconds(Type value) {
+  return conversion<FastUnorderedElapsedCounterSource, 1>(value);
+}
 
-  assert(end >= start, "negative time!");
+uint64_t FastUnorderedElapsedCounterSource::milliseconds(Type value) {
+  return (uint64_t)conversion<FastUnorderedElapsedCounterSource, MILLIUNITS>(value);
+}
 
-  _span_ticks = end.value() - start.value();
+uint64_t FastUnorderedElapsedCounterSource::microseconds(Type value) {
+  return (uint64_t)conversion<FastUnorderedElapsedCounterSource, MICROUNITS>(value);
+}
+
+uint64_t FastUnorderedElapsedCounterSource::nanoseconds(Type value) {
+  return (uint64_t)conversion<FastUnorderedElapsedCounterSource, NANOUNITS>(value);
+}
+
+uint64_t CompositeElapsedCounterSource::frequency() {
+  return ElapsedCounterSource::frequency();
 }
 
-template <typename ReturnType>
-static ReturnType time_conversion(const Tickspan& span, TicksToTimeHelper::Unit unit) {
-  assert(TicksToTimeHelper::SECONDS == unit ||
-         TicksToTimeHelper::MILLISECONDS == unit, "invalid unit!");
-
-  ReturnType frequency_per_unit = (ReturnType)os::elapsed_frequency() / (ReturnType)unit;
-
-  return (ReturnType) ((ReturnType)span.value() / frequency_per_unit);
+CompositeElapsedCounterSource::Type CompositeElapsedCounterSource::now() {
+  CompositeTime ct;
+  ct.val1 = ElapsedCounterSource::now();
+#ifdef X86
+  static bool initialized = false;
+  static bool valid_rdtsc = false;
+  if (!initialized) {
+    valid_rdtsc = Rdtsc::initialize();
+    initialized = true;
+  }
+  if (valid_rdtsc) {
+    ct.val2 = Rdtsc::elapsed_counter();
+  }
+#endif
+  return ct;
 }
 
-double TicksToTimeHelper::seconds(const Tickspan& span) {
-  return time_conversion<double>(span, SECONDS);
+double CompositeElapsedCounterSource::seconds(Type value) {
+  return conversion<ElapsedCounterSource, 1>(value.val1);
+}
+
+uint64_t CompositeElapsedCounterSource::milliseconds(Type value) {
+  return (uint64_t)conversion<ElapsedCounterSource, MILLIUNITS>(value.val1);
 }
 
-jlong TicksToTimeHelper::milliseconds(const Tickspan& span) {
-  return time_conversion<jlong>(span, MILLISECONDS);
+uint64_t CompositeElapsedCounterSource::microseconds(Type value) {
+  return (uint64_t)conversion<ElapsedCounterSource, MICROUNITS>(value.val1);
 }
+
+uint64_t CompositeElapsedCounterSource::nanoseconds(Type value) {
+  return (uint64_t)conversion<ElapsedCounterSource, NANOUNITS>(value.val1);
+}
--- a/src/share/vm/utilities/ticks.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/ticks.hpp	Mon Aug 12 18:30:40 2019 +0300
@@ -1,111 +1,249 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
+* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
 
 #ifndef SHARE_VM_UTILITIES_TICKS_HPP
 #define SHARE_VM_UTILITIES_TICKS_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
-#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 
-class Ticks;
+// Time sources
+class ElapsedCounterSource {
+ public:
+  typedef jlong Type;
+  static uint64_t frequency();
+  static Type now();
+  static double seconds(Type value);
+  static uint64_t milliseconds(Type value);
+  static uint64_t microseconds(Type value);
+  static uint64_t nanoseconds(Type value);
+};
 
-class Tickspan VALUE_OBJ_CLASS_SPEC {
-  friend class Ticks;
-  friend Tickspan operator-(const Ticks& end, const Ticks& start);
+// Not guaranteed to be synchronized across hardware threads and
+// therefore software threads, and can be updated asynchronously
+// by software. now() can jump backwards as well as jump forward
+// when threads query different cores/sockets.
+// Very much not recommended for general use. Caveat emptor.
+class FastUnorderedElapsedCounterSource {
+ public:
+  typedef jlong Type;
+  static uint64_t frequency();
+  static Type now();
+  static double seconds(Type value);
+  static uint64_t milliseconds(Type value);
+  static uint64_t microseconds(Type value);
+  static uint64_t nanoseconds(Type value);
+};
 
- private:
-  jlong _span_ticks;
+template <typename T1, typename T2>
+class PairRep {
+ public:
+  T1 val1;
+  T2 val2;
 
-  Tickspan(const Ticks& end, const Ticks& start);
-
- public:
-  Tickspan() : _span_ticks(0) {}
+  PairRep() : val1((T1)0), val2((T2)0) {}
+  void operator+=(const PairRep& rhs) {
+    val1 += rhs.val1;
+    val2 += rhs.val2;
+  }
+  void operator-=(const PairRep& rhs) {
+    val1 -= rhs.val1;
+    val2 -= rhs.val2;
+  }
+  bool operator==(const PairRep& rhs) const {
+    return val1 == rhs.val1;
+  }
+  bool operator!=(const PairRep& rhs) const {
+    return !operator==(rhs);
+  }
+  bool operator<(const PairRep& rhs) const {
+    return val1 < rhs.val1;
+  }
+  bool operator>(const PairRep& rhs) const {
+    return val1 > rhs.val1;
+  }
+};
 
-  Tickspan& operator+=(const Tickspan& rhs) {
-    _span_ticks += rhs._span_ticks;
-    return *this;
-  }
+template <typename T1, typename T2>
+PairRep<T1, T2> operator-(const PairRep<T1, T2>& lhs, const PairRep<T1, T2>& rhs) {
+  PairRep<T1, T2> temp(lhs);
+  temp -= rhs;
+  return temp;
+}
+
+typedef PairRep<ElapsedCounterSource::Type, FastUnorderedElapsedCounterSource::Type> CompositeTime;
 
-  jlong value() const {
-    return _span_ticks;
-  }
-
+class CompositeElapsedCounterSource {
+ public:
+  typedef CompositeTime Type;
+  static uint64_t frequency();
+  static Type now();
+  static double seconds(Type value);
+  static uint64_t milliseconds(Type value);
+  static uint64_t microseconds(Type value);
+  static uint64_t nanoseconds(Type value);
 };
 
-class Ticks VALUE_OBJ_CLASS_SPEC {
- private:
-  jlong _stamp_ticks;
-
+template <typename TimeSource>
+class Representation {
+ public:
+  typedef typename TimeSource::Type Type;
+ protected:
+  Type _rep;
+  Representation(const Representation<TimeSource>& end, const Representation<TimeSource>& start) : _rep(end._rep - start._rep) {}
+  Representation() : _rep() {}
  public:
-  Ticks() : _stamp_ticks(0) {
-    assert((_stamp_ticks = invalid_time_stamp) == invalid_time_stamp,
-      "initial unstamped time value assignment");
+  void operator+=(const Representation<TimeSource>& rhs) {
+    _rep += rhs._rep;
+  }
+  void operator-=(const Representation<TimeSource>& rhs) {
+    _rep -= rhs._rep;
+  }
+  bool operator==(const Representation<TimeSource>& rhs) const {
+    return _rep == rhs._rep;
+  }
+  bool operator!=(const Representation<TimeSource>& rhs) const {
+    return !operator==(rhs);
+  }
+  bool operator<(const Representation<TimeSource>& rhs) const {
+    return _rep < rhs._rep;
+  }
+  bool operator>(const Representation<TimeSource>& rhs) const {
+    return _rep > rhs._rep;
+  }
+  bool operator<=(const Representation<TimeSource>& rhs) const {
+    return !operator>(rhs);
+  }
+  bool operator>=(const Representation<TimeSource>& rhs) const {
+    return !operator<(rhs);
+  }
+  double seconds() const {
+    return TimeSource::seconds(_rep);
+  }
+  uint64_t milliseconds() const {
+    return TimeSource::milliseconds(_rep);
+  }
+  uint64_t microseconds() const {
+    return TimeSource::microseconds(_rep);
+  }
+  uint64_t nanoseconds() const {
+    return TimeSource::nanoseconds(_rep);
+  }
+};
+
+template <typename TimeSource>
+class CounterRepresentation : public Representation<TimeSource> {
+ protected:
+  CounterRepresentation(const CounterRepresentation& end, const CounterRepresentation& start) : Representation<TimeSource>(end, start) {}
+  explicit CounterRepresentation(jlong value) : Representation<TimeSource>() {
+    this->_rep = value;
   }
+ public:
+  CounterRepresentation() : Representation<TimeSource>() {}
+  typename TimeSource::Type value() const { return this->_rep; }
+  operator typename TimeSource::Type() { return value(); }
+};
 
-  Ticks& operator+=(const Tickspan& span) {
-    _stamp_ticks += span.value();
+template <typename TimeSource>
+class CompositeCounterRepresentation : public Representation<TimeSource> {
+ protected:
+  CompositeCounterRepresentation(const CompositeCounterRepresentation& end, const CompositeCounterRepresentation& start) :
+    Representation<TimeSource>(end, start) {}
+  explicit CompositeCounterRepresentation(jlong value) : Representation<TimeSource>() {
+    this->_rep.val1 = value;
+    this->_rep.val2 = value;
+  }
+ public:
+  CompositeCounterRepresentation() : Representation<TimeSource>() {}
+  ElapsedCounterSource::Type value() const { return this->_rep.val1; }
+  FastUnorderedElapsedCounterSource::Type ft_value() const { return this->_rep.val2; }
+};
+
+template <template <typename> class, typename>
+class TimeInstant;
+
+template <template <typename> class Rep, typename TimeSource>
+class TimeInterval : public Rep<TimeSource> {
+  template <template <typename> class, typename>
+  friend class TimeInstant;
+  TimeInterval(const TimeInstant<Rep, TimeSource>& end, const TimeInstant<Rep, TimeSource>& start) : Rep<TimeSource>(end, start) {}
+ public:
+  TimeInterval() : Rep<TimeSource>() {}
+  TimeInterval<Rep, TimeSource> operator+(const TimeInterval<Rep, TimeSource>& rhs) const {
+    TimeInterval<Rep, TimeSource> temp(*this);
+    temp += rhs;
+    return temp;
+  }
+  TimeInterval<Rep, TimeSource> operator-(const TimeInterval<Rep, TimeSource>& rhs) const {
+    TimeInterval<Rep, TimeSource> temp(*this);
+    temp -= rhs;
+    return temp;
+  }
+};
+
+template <template <typename> class Rep, typename TimeSource>
+class TimeInstant : public Rep<TimeSource> {
+ public:
+  TimeInstant() : Rep<TimeSource>() {}
+  TimeInstant<Rep, TimeSource>& operator+=(const TimeInterval<Rep, TimeSource>& rhs) {
+    Rep<TimeSource>::operator+=(rhs);
     return *this;
   }
-
-  Ticks& operator-=(const Tickspan& span) {
-    _stamp_ticks -= span.value();
+  TimeInstant<Rep, TimeSource>& operator-=(const TimeInterval<Rep, TimeSource>& rhs) {
+    Rep<TimeSource>::operator-=(rhs);
     return *this;
   }
-
-  void stamp();
-
-  jlong value() const {
-    return _stamp_ticks;
+  TimeInterval<Rep, TimeSource> operator+(const TimeInstant<Rep, TimeSource>& end) const {
+    return TimeInterval<Rep, TimeSource>(end, *this);
+  }
+  TimeInterval<Rep, TimeSource> operator-(const TimeInstant<Rep, TimeSource>& start) const {
+    return TimeInterval<Rep, TimeSource>(*this, start);
+  }
+  void stamp() {
+    this->_rep = TimeSource::now();
+  }
+  static TimeInstant<Rep, TimeSource> now() {
+    TimeInstant<Rep, TimeSource> temp;
+    temp.stamp();
+    return temp;
   }
-
-  static const Ticks now();
+ private:
+  TimeInstant(jlong ticks) : Rep<TimeSource>(ticks) {}
+  friend class GranularTimer;
+  friend class ObjectSample;
+  //  GC VM tests
+  friend class TimePartitionPhasesIteratorTest;
+  friend class GCTimerTest;
+};
 
-#ifdef ASSERT
-  static const jlong invalid_time_stamp;
+#if INCLUDE_JFR
+typedef TimeInstant<CompositeCounterRepresentation, CompositeElapsedCounterSource> Ticks;
+typedef TimeInterval<CompositeCounterRepresentation, CompositeElapsedCounterSource> Tickspan;
+#else
+typedef TimeInstant<CounterRepresentation, ElapsedCounterSource> Ticks;
+typedef TimeInterval<CounterRepresentation, ElapsedCounterSource> Tickspan;
 #endif
 
-#ifndef PRODUCT
-  // only for internal use by GC VM tests
-  friend class TimePartitionPhasesIteratorTest;
-  friend class GCTimerTest;
-
- private:
-  // implicit type conversion
-  Ticks(int ticks) : _stamp_ticks(ticks) {}
-
-#endif // !PRODUCT
-
-};
-
-class TicksToTimeHelper : public AllStatic {
- public:
-  enum Unit {
-    SECONDS = 1,
-    MILLISECONDS = 1000
-  };
-  static double seconds(const Tickspan& span);
-  static jlong milliseconds(const Tickspan& span);
-};
-
 #endif // SHARE_VM_UTILITIES_TICKS_HPP
--- a/src/share/vm/utilities/ticks.inline.hpp	Thu Aug 01 03:44:03 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_UTILITIES_TICKS_INLINE_HPP
-#define SHARE_VM_UTILITIES_TICKS_INLINE_HPP
-
-#include "utilities/ticks.hpp"
-
-inline Tickspan operator+(Tickspan lhs, const Tickspan& rhs) {
-  lhs += rhs;
-  return lhs;
-}
-
-inline bool operator==(const Tickspan& lhs, const Tickspan& rhs) {
-  return lhs.value() == rhs.value();
-}
-
-inline bool operator!=(const Tickspan& lhs, const Tickspan& rhs) {
-  return !operator==(lhs,rhs);
-}
-
-inline bool operator<(const Tickspan& lhs, const Tickspan& rhs) {
-  return lhs.value() < rhs.value();
-}
-
-inline bool operator>(const Tickspan& lhs, const Tickspan& rhs) {
-  return operator<(rhs,lhs);
-}
-
-inline bool operator<=(const Tickspan& lhs, const Tickspan& rhs) {
-  return !operator>(lhs,rhs);
-}
-
-inline bool operator>=(const Tickspan& lhs, const Tickspan& rhs) {
-  return !operator<(lhs,rhs);
-}
-
-inline Ticks operator+(Ticks lhs, const Tickspan& span) {
-  lhs += span;
-  return lhs;
-}
-
-inline Ticks operator-(Ticks lhs, const Tickspan& span) {
-  lhs -= span;
-  return lhs;
-}
-
-inline Tickspan operator-(const Ticks& end, const Ticks& start) {
-  return Tickspan(end, start);
-}
-
-inline bool operator==(const Ticks& lhs, const Ticks& rhs) {
-  return lhs.value() == rhs.value();
-}
-
-inline bool operator!=(const Ticks& lhs, const Ticks& rhs) {
-  return !operator==(lhs,rhs);
-}
-
-inline bool operator<(const Ticks& lhs, const Ticks& rhs) {
-  return lhs.value() < rhs.value();
-}
-
-inline bool operator>(const Ticks& lhs, const Ticks& rhs) {
-  return operator<(rhs,lhs);
-}
-
-inline bool operator<=(const Ticks& lhs, const Ticks& rhs) {
-  return !operator>(lhs,rhs);
-}
-
-inline bool operator>=(const Ticks& lhs, const Ticks& rhs) {
-  return !operator<(lhs,rhs);
-}
-
-#endif // SHARE_VM_UTILITIES_TICKS_INLINE_HPP
--- a/src/share/vm/utilities/vmError.cpp	Thu Aug 01 03:44:03 2019 +0100
+++ b/src/share/vm/utilities/vmError.cpp	Mon Aug 12 18:30:40 2019 +0300
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "compiler/compileBroker.hpp"
 #include "gc_interface/collectedHeap.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "prims/whitebox.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/frame.inline.hpp"
@@ -42,6 +43,10 @@
 #include "utilities/events.hpp"
 #include "utilities/top.hpp"
 #include "utilities/vmError.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -939,6 +944,13 @@
     // are handled properly.
     reset_signal_handlers();
 
+    EventShutdown e;
+    if (e.should_commit()) {
+      e.set_reason("VM Error");
+      e.commit();
+    }
+
+    JFR_ONLY(Jfr::on_vm_shutdown(true);)
   } else {
     // If UseOsErrorReporting we call this for each level of the call stack
     // while searching for the exception handler.  Only the first level needs