changeset 9170:8e875c964f41

8214542: JFR: Old Object Sample event slow on a deep heap in debug builds Reviewed-by: egahlin, rwestberg
author ddong
date Wed, 09 Oct 2019 16:11:58 +0800
parents 1258121876f8
children 986b79fabfa0
files src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp src/share/vm/jfr/leakprofiler/chains/bitset.hpp src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp src/share/vm/jfr/leakprofiler/chains/edge.hpp src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.cpp src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.hpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp src/share/vm/jfr/leakprofiler/emitEventOperation.cpp src/share/vm/jfr/leakprofiler/emitEventOperation.hpp src/share/vm/jfr/leakprofiler/leakProfiler.cpp src/share/vm/jfr/leakprofiler/leakProfiler.hpp src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp src/share/vm/jfr/leakprofiler/startOperation.hpp src/share/vm/jfr/leakprofiler/stopOperation.hpp src/share/vm/jfr/leakprofiler/utilities/vmOperation.hpp src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp src/share/vm/jfr/recorder/service/jfrRecorderService.cpp src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp src/share/vm/jfr/support/jfrFlush.hpp src/share/vm/jfr/support/jfrThreadLocal.cpp src/share/vm/jfr/support/jfrThreadLocal.hpp src/share/vm/runtime/vm_operations.hpp
diffstat 38 files changed, 1217 insertions(+), 1130 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,7 +97,6 @@
 }
 
 void BFSClosure::process() {
-
   process_root_set();
   process_queue();
 }
@@ -136,7 +135,6 @@
 
     // if we are processinig initial root set, don't add to queue
     if (_current_parent != NULL) {
-      assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant");
       _edge_queue->add(_current_parent, reference);
     }
 
@@ -149,20 +147,8 @@
 void BFSClosure::add_chain(const oop* reference, const oop pointee) {
   assert(pointee != NULL, "invariant");
   assert(NULL == pointee->mark(), "invariant");
-
-  const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2;
-  ResourceMark rm;
-  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
-  size_t idx = 0;
-  chain[idx++] = Edge(NULL, reference);
-  // aggregate from breadth-first search
-  const Edge* current = _current_parent;
-  while (current != NULL) {
-    chain[idx++] = Edge(NULL, current->reference());
-    current = current->parent();
-  }
-  assert(length == idx, "invariant");
-  _edge_store->add_chain(chain, length);
+  Edge leak_edge(_current_parent, reference);
+  _edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
 }
 
 void BFSClosure::dfs_fallback() {
@@ -239,3 +225,10 @@
     closure_impl(UnifiedOop::encode(ref), pointee);
   }
 }
+
+void BFSClosure::do_root(const oop* ref) {
+  assert(ref != NULL, "invariant");
+  if (!_edge_queue->is_full()) {
+    _edge_queue->add(NULL, ref);
+  }
+}
--- a/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -26,7 +26,6 @@
 #define SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
 class BitSet;
 class Edge;
@@ -65,6 +64,7 @@
  public:
   BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
   void process();
+  void do_root(const oop* ref);
 
   virtual void do_oop(oop* ref);
   virtual void do_oop(narrowOop* ref);
--- a/src/share/vm/jfr/leakprofiler/chains/bitset.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/bitset.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -47,7 +47,7 @@
 
   BitMap::idx_t mark_obj(const HeapWord* addr) {
     const BitMap::idx_t bit = addr_to_bit(addr);
-    _bits.par_set_bit(bit);
+    _bits.set_bit(bit);
     return bit;
   }
 
--- a/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,14 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
 #include "jfr/leakprofiler/chains/edge.hpp"
 #include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
-#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
-#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
@@ -87,15 +87,15 @@
   // Mark root set, to avoid going sideways
   _max_depth = 1;
   _ignore_root_set = false;
-  DFSClosure dfs1;
-  RootSetClosure::process_roots(&dfs1);
+  DFSClosure dfs;
+  RootSetClosure<DFSClosure> rs(&dfs);
+  rs.process();
 
   // Depth-first search
   _max_depth = max_dfs_depth;
   _ignore_root_set = true;
   assert(_start_edge == NULL, "invariant");
-  DFSClosure dfs2;
-  RootSetClosure::process_roots(&dfs2);
+  rs.process();
 }
 
 void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
@@ -132,30 +132,29 @@
 }
 
 void DFSClosure::add_chain() {
-  const size_t length = _start_edge == NULL ? _depth + 1 :
-                        _start_edge->distance_to_root() + 1 + _depth + 1;
+  const size_t array_length = _depth + 2;
 
   ResourceMark rm;
-  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
+  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, array_length);
   size_t idx = 0;
 
   // aggregate from depth-first search
   const DFSClosure* c = this;
   while (c != NULL) {
-    chain[idx++] = Edge(NULL, c->reference());
+    const size_t next = idx + 1;
+    chain[idx++] = Edge(&chain[next], c->reference());
     c = c->parent();
   }
-
-  assert(idx == _depth + 1, "invariant");
+  assert(_depth + 1 == idx, "invariant");
+  assert(array_length == idx + 1, "invariant");
 
   // aggregate from breadth-first search
-  const Edge* current = _start_edge;
-  while (current != NULL) {
-    chain[idx++] = Edge(NULL, current->reference());
-    current = current->parent();
+  if (_start_edge != NULL) {
+    chain[idx++] = *_start_edge;
+  } else {
+    chain[idx - 1] = Edge(NULL, chain[idx - 1].reference());
   }
-  assert(idx == length, "invariant");
-  _edge_store->add_chain(chain, length);
+  _edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0));
 }
 
 void DFSClosure::do_oop(oop* ref) {
@@ -175,3 +174,10 @@
     closure_impl(UnifiedOop::encode(ref), pointee);
   }
 }
+
+void DFSClosure::do_root(const oop* ref) {
+  assert(ref != NULL, "invariant");
+  const oop pointee = UnifiedOop::dereference(ref);
+  assert(pointee != NULL, "invariant");
+  closure_impl(ref, pointee);
+}
--- a/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -26,7 +26,6 @@
 #define SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
 class BitSet;
 class Edge;
@@ -34,7 +33,7 @@
 class EdgeQueue;
 
 // Class responsible for iterating the heap depth-first
-class DFSClosure: public ExtendedOopClosure { // XXX BasicOopIterateClosure
+class DFSClosure : public ExtendedOopClosure { // XXX BasicOopIterateClosure
  private:
   static EdgeStore* _edge_store;
   static BitSet*    _mark_bits;
@@ -57,6 +56,7 @@
  public:
   static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
   static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
+  void do_root(const oop* ref);
 
   virtual void do_oop(oop* ref);
   virtual void do_oop(narrowOop* ref);
--- a/src/share/vm/jfr/leakprofiler/chains/edge.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/edge.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -29,7 +29,7 @@
 #include "oops/oopsHierarchy.hpp"
 
 class Edge {
- private:
+ protected:
   const Edge* _parent;
   const oop* _reference;
  public:
--- a/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,37 +27,17 @@
 #include "jfr/leakprofiler/chains/edgeUtils.hpp"
 #include "oops/oop.inline.hpp"
 
-RoutableEdge::RoutableEdge() : Edge() {}
-RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference),
-                                                                       _skip_edge(NULL),
-                                                                       _skip_length(0),
-                                                                       _processed(false) {}
+StoredEdge::StoredEdge() : Edge() {}
+StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {}
 
-RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge),
-                                               _skip_edge(NULL),
-                                               _skip_length(0),
-                                               _processed(false) {}
-
-RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge),
-                                                      _skip_edge(edge._skip_edge),
-                                                      _skip_length(edge._skip_length),
-                                                      _processed(edge._processed) {}
+StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {}
 
-void RoutableEdge::operator=(const RoutableEdge& edge) {
-  Edge::operator=(edge);
-  _skip_edge = edge._skip_edge;
-  _skip_length = edge._skip_length;
-  _processed = edge._processed;
-}
+StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {}
 
-size_t RoutableEdge::logical_distance_to_root() const {
-  size_t depth = 0;
-  const RoutableEdge* current = logical_parent();
-  while (current != NULL) {
-    depth++;
-    current = current->logical_parent();
-  }
-  return depth;
+void StoredEdge::operator=(const StoredEdge& edge) {
+  Edge::operator=(edge);
+  _gc_root_id = edge._gc_root_id;
+  _skip_length = edge._skip_length;
 }
 
 traceid EdgeStore::_edge_id_counter = 0;
@@ -69,79 +49,12 @@
 EdgeStore::~EdgeStore() {
   assert(_edges != NULL, "invariant");
   delete _edges;
-  _edges = NULL;
-}
-
-const Edge* EdgeStore::get_edge(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
-  return entry != NULL ? entry->literal_addr() : NULL;
-}
-
-const Edge* EdgeStore::put(const Edge* edge) {
-  assert(edge != NULL, "invariant");
-  const RoutableEdge e = *edge;
-  assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant");
-  EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference());
-  return entry.literal_addr();
-}
-
-traceid EdgeStore::get_id(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
-  assert(entry != NULL, "invariant");
-  return entry->id();
-}
-
-traceid EdgeStore::get_root_id(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  const Edge* root = EdgeUtils::root(*edge);
-  assert(root != NULL, "invariant");
-  return get_id(root);
-}
-
-void EdgeStore::add_chain(const Edge* chain, size_t length) {
-  assert(chain != NULL, "invariant");
-  assert(length > 0, "invariant");
-
-  size_t bottom_index = length - 1;
-  const size_t top_index = 0;
-
-  const Edge* stored_parent_edge = NULL;
-
-  // determine level of shared ancestry
-  for (; bottom_index > top_index; --bottom_index) {
-    const Edge* stored_edge = get_edge(&chain[bottom_index]);
-    if (stored_edge != NULL) {
-      stored_parent_edge = stored_edge;
-      continue;
-    }
-    break;
-  }
-
-  // insertion of new Edges
-  for (int i = (int)bottom_index; i >= (int)top_index; --i) {
-    Edge edge(stored_parent_edge, chain[i].reference());
-    stored_parent_edge = put(&edge);
-  }
-
-  const oop sample_object = stored_parent_edge->pointee();
-  assert(sample_object != NULL, "invariant");
-  assert(NULL == sample_object->mark(), "invariant");
-
-  // Install the "top" edge of the chain into the sample object mark oop.
-  // This associates the sample object with its navigable reference chain.
-  sample_object->set_mark(markOop(stored_parent_edge));
 }
 
 bool EdgeStore::is_empty() const {
   return !_edges->has_entries();
 }
 
-size_t EdgeStore::number_of_entries() const {
-  return _edges->cardinality();
-}
-
 void EdgeStore::assign_id(EdgeEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
@@ -153,3 +66,254 @@
   assert(entry->hash() == hash, "invariant");
   return true;
 }
+
+#ifdef ASSERT
+bool EdgeStore::contains(const oop* reference) const {
+  return get(reference) != NULL;
+}
+#endif
+
+StoredEdge* EdgeStore::get(const oop* reference) const {
+  assert(reference != NULL, "invariant");
+  const StoredEdge e(NULL, reference);
+  EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
+  return entry != NULL ? entry->literal_addr() : NULL;
+}
+
+StoredEdge* EdgeStore::put(const oop* reference) {
+  assert(reference != NULL, "invariant");
+  const StoredEdge e(NULL, reference);
+  assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
+  EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
+  return entry.literal_addr();
+}
+
+traceid EdgeStore::get_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
+  assert(entry != NULL, "invariant");
+  return entry->id();
+}
+
+traceid EdgeStore::gc_root_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  const traceid gc_root_id = static_cast<const StoredEdge*>(edge)->gc_root_id();
+  if (gc_root_id != 0) {
+    return gc_root_id;
+  }
+  // not cached
+  assert(edge != NULL, "invariant");
+  const Edge* const root = EdgeUtils::root(*edge);
+  assert(root != NULL, "invariant");
+  assert(root->parent() == NULL, "invariant");
+  return get_id(root);
+}
+
+static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_root, size_t* skip_length) {
+  assert(distance_to_root >= EdgeUtils::root_context, "invariant");
+  assert(*skip_length == 0, "invariant");
+  *skip_length = distance_to_root - (EdgeUtils::root_context - 1);
+  const Edge* const target = EdgeUtils::ancestor(**current, *skip_length);
+  assert(target != NULL, "invariant");
+  assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant");
+  return target;
+}
+
+bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) {
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  assert((*current)->distance_to_root() == distance_to_root, "invariant");
+
+  if (distance_to_root < EdgeUtils::root_context) {
+    // nothing to skip
+    return false;
+  }
+
+  size_t skip_length = 0;
+  const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length);
+  assert(skip_ancestor != NULL, "invariant");
+  (*previous)->set_skip_length(skip_length);
+
+  // lookup target
+  StoredEdge* stored_target = get(skip_ancestor->reference());
+  if (stored_target != NULL) {
+    (*previous)->set_parent(stored_target);
+    // linked to existing, complete
+    return true;
+  }
+
+  assert(stored_target == NULL, "invariant");
+  stored_target = put(skip_ancestor->reference());
+  assert(stored_target != NULL, "invariant");
+  (*previous)->set_parent(stored_target);
+  *previous = stored_target;
+  *current = skip_ancestor->parent();
+  return false;
+}
+
+static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) {
+  assert(current_stored != NULL, "invariant");
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  (*previous)->set_parent(current_stored);
+}
+
+static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) {
+  assert(edge != NULL, "invariant");
+  assert(distance != NULL, "invariant");
+  const StoredEdge* current = edge;
+  *distance = 1;
+  while (current != NULL && !current->is_skip_edge()) {
+    ++(*distance);
+    current = current->parent();
+  }
+  return current;
+}
+
+void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) {
+  assert(current_stored != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  size_t distance_to_skip_edge; // including the skip edge itself
+  const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge);
+  if (closest_skip_edge == NULL) {
+    // no found skip edge implies root
+    if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) {
+      link_edge(current_stored, previous);
+      return;
+    }
+    assert(current_stored->distance_to_root() == distance_to_skip_edge - 2, "invariant");
+    put_skip_edge(previous, reinterpret_cast<const Edge**>(&current_stored), distance_to_skip_edge - 2);
+    return;
+  }
+  assert(closest_skip_edge->is_skip_edge(), "invariant");
+  if (distance_to_skip_edge + previous_length <= EdgeUtils::leak_context) {
+    link_edge(current_stored, previous);
+    return;
+  }
+  // create a new skip edge with derived information from closest skip edge
+  (*previous)->set_skip_length(distance_to_skip_edge + closest_skip_edge->skip_length());
+  (*previous)->set_parent(closest_skip_edge->parent());
+}
+
+StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) {
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  assert(!contains((*current)->reference()), "invariant");
+  StoredEdge* const stored_edge = put((*current)->reference());
+  assert(stored_edge != NULL, "invariant");
+  link_edge(stored_edge, previous);
+  return stored_edge;
+}
+
+bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) {
+  assert(*previous != NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  size_t depth = 1;
+  while (*current != NULL && depth < limit) {
+    StoredEdge* stored_edge = get((*current)->reference());
+    if (stored_edge != NULL) {
+      link_with_existing_chain(stored_edge, previous, depth);
+      return true;
+    }
+    stored_edge = link_new_edge(previous, current);
+    assert((*previous)->parent() != NULL, "invariant");
+    *previous = stored_edge;
+    *current = (*current)->parent();
+    ++depth;
+  }
+  return NULL == *current;
+}
+
+// Install the immediate edge into the mark word of the leak candidate object
+StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
+  assert(edge != NULL, "invariant");
+  assert(!contains(edge->reference()), "invariant");
+  StoredEdge* const leak_context_edge = put(edge->reference());
+  oop sample_object = edge->pointee();
+  assert(sample_object != NULL, "invariant");
+  assert(NULL == sample_object->mark(), "invariant");
+  sample_object->set_mark(markOop(leak_context_edge));
+  return leak_context_edge;
+}
+
+/*
+ * The purpose of put_chain() is to reify the edge sequence
+ * discovered during heap traversal with a normalized logical copy.
+ * This copy consist of two sub-sequences and a connecting link (skip edge).
+ *
+ * "current" can be thought of as the cursor (search) edge, it is not in the edge store.
+ * "previous" is always an edge in the edge store.
+ * The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store.
+ */
+void EdgeStore::put_chain(const Edge* chain, size_t length) {
+  assert(chain != NULL, "invariant");
+  assert(chain->distance_to_root() + 1 == length, "invariant");
+  StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain);
+  assert(leak_context_edge != NULL, "invariant");
+  assert(leak_context_edge->parent() == NULL, "invariant");
+
+  if (1 == length) {
+    return;
+  }
+
+  const Edge* current = chain->parent();
+  assert(current != NULL, "invariant");
+  StoredEdge* previous = leak_context_edge;
+
+  // a leak context is the sequence of (limited) edges reachable from the leak candidate
+  if (put_edges(&previous, &current, EdgeUtils::leak_context)) {
+    // complete
+    assert(previous != NULL, "invariant");
+    put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
+    return;
+  }
+
+  const size_t distance_to_root = length > EdgeUtils::leak_context ? length - 1 - EdgeUtils::leak_context : length - 1;
+  assert(current->distance_to_root() == distance_to_root, "invariant");
+
+  // a skip edge is the logical link
+  // connecting the leak context sequence with the root context sequence
+  if (put_skip_edge(&previous, &current, distance_to_root)) {
+    // complete
+    assert(previous != NULL, "invariant");
+    assert(previous->is_skip_edge(), "invariant");
+    assert(previous->parent() != NULL, "invariant");
+    put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent()));
+    return;
+  }
+
+  assert(current->distance_to_root() < EdgeUtils::root_context, "invariant");
+
+  // a root context is the sequence of (limited) edges reachable from the root
+  put_edges(&previous, &current, EdgeUtils::root_context);
+  assert(previous != NULL, "invariant");
+  put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
+}
+
+void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const {
+  assert(leak_context_edge != NULL, "invariant");
+  assert(root != NULL, "invariant");
+  store_gc_root_id_in_leak_context_edge(leak_context_edge, root);
+  assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant");
+}
+
+// To avoid another traversal to resolve the root edge id later,
+// cache it in the immediate leak context edge for fast retrieval.
+void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const {
+  assert(leak_context_edge != NULL, "invariant");
+  assert(leak_context_edge->gc_root_id() == 0, "invariant");
+  assert(root != NULL, "invariant");
+  assert(root->parent() == NULL, "invariant");
+  assert(root->distance_to_root() == 0, "invariant");
+  const StoredEdge* const stored_root = static_cast<const StoredEdge*>(root);
+  traceid root_id = stored_root->gc_root_id();
+  if (root_id == 0) {
+    root_id = get_id(root);
+    stored_root->set_gc_root_id(root_id);
+  }
+  assert(root_id != 0, "invariant");
+  leak_context_edge->set_gc_root_id(root_id);
+  assert(leak_context_edge->gc_root_id() == stored_root->gc_root_id(), "invariant");
+}
--- a/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -25,64 +25,40 @@
 #ifndef SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
 #define SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
 
+#include "jfr/leakprofiler/chains/edge.hpp"
 #include "jfr/utilities/jfrHashtable.hpp"
-#include "jfr/leakprofiler/chains/edge.hpp"
 #include "memory/allocation.hpp"
 
 typedef u8 traceid;
 
-class RoutableEdge : public Edge {
+class StoredEdge : public Edge {
  private:
-  mutable const RoutableEdge* _skip_edge;
-  mutable size_t _skip_length;
-  mutable bool _processed;
+  mutable traceid _gc_root_id;
+  size_t _skip_length;
 
  public:
-  RoutableEdge();
-  RoutableEdge(const Edge* parent, const oop* reference);
-  RoutableEdge(const Edge& edge);
-  RoutableEdge(const RoutableEdge& edge);
-  void operator=(const RoutableEdge& edge);
-
-  const RoutableEdge* skip_edge() const { return _skip_edge; }
-  size_t skip_length() const { return _skip_length; }
+  StoredEdge();
+  StoredEdge(const Edge* parent, const oop* reference);
+  StoredEdge(const Edge& edge);
+  StoredEdge(const StoredEdge& edge);
+  void operator=(const StoredEdge& edge);
 
-  bool is_skip_edge() const { return _skip_edge != NULL; }
-  bool processed() const { return _processed; }
-  bool is_sentinel() const {
-    return _skip_edge == NULL && _skip_length == 1;
-  }
-
-  void set_skip_edge(const RoutableEdge* edge) const {
-    assert(!is_skip_edge(), "invariant");
-    assert(edge != this, "invariant");
-    _skip_edge = edge;
-  }
+  traceid gc_root_id() const { return _gc_root_id; }
+  void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; }
 
-  void set_skip_length(size_t length) const {
-    _skip_length = length;
-  }
-
-  void set_processed() const {
-    assert(!_processed, "invariant");
-    _processed = true;
-  }
+  bool is_skip_edge() const { return _skip_length != 0; }
+  size_t skip_length() const { return _skip_length; }
+  void set_skip_length(size_t length) { _skip_length = length; }
 
-  // true navigation according to physical tree representation
-  const RoutableEdge* physical_parent() const {
-    return static_cast<const RoutableEdge*>(parent());
-  }
+  void set_parent(const Edge* edge) { this->_parent = edge; }
 
-  // logical navigation taking skip levels into account
-  const RoutableEdge* logical_parent() const {
-    return is_skip_edge() ? skip_edge() : physical_parent();
+  StoredEdge* parent() const {
+    return const_cast<StoredEdge*>(static_cast<const StoredEdge*>(Edge::parent()));
   }
-
-  size_t logical_distance_to_root() const;
 };
 
 class EdgeStore : public CHeapObj<mtTracing> {
-  typedef HashTableHost<RoutableEdge, traceid, Entry, EdgeStore> EdgeHashTable;
+  typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
   typedef EdgeHashTable::HashEntry EdgeEntry;
   template <typename,
             typename,
@@ -90,6 +66,9 @@
             typename,
             size_t>
   friend class HashTableHost;
+  friend class EventEmitter;
+  friend class ObjectSampleWriter;
+  friend class ObjectSampleCheckpoint;
  private:
   static traceid _edge_id_counter;
   EdgeHashTable* _edges;
@@ -98,22 +77,31 @@
   void assign_id(EdgeEntry* entry);
   bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
 
-  const Edge* get_edge(const Edge* edge) const;
-  const Edge* put(const Edge* edge);
+  StoredEdge* get(const oop* reference) const;
+  StoredEdge* put(const oop* reference);
+  traceid gc_root_id(const Edge* edge) const;
+
+  bool put_edges(StoredEdge** previous, const Edge** current, size_t length);
+  bool put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root);
+  void put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const;
+
+  StoredEdge* associate_leak_context_with_candidate(const Edge* edge);
+  void store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const;
+  StoredEdge* link_new_edge(StoredEdge** previous, const Edge** current);
+  void link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length);
+
+  template <typename T>
+  void iterate(T& functor) const { _edges->iterate_value<T>(functor); }
+
+  DEBUG_ONLY(bool contains(const oop* reference) const;)
 
  public:
   EdgeStore();
   ~EdgeStore();
 
-  void add_chain(const Edge* chain, size_t length);
   bool is_empty() const;
-  size_t number_of_entries() const;
-
   traceid get_id(const Edge* edge) const;
-  traceid get_root_id(const Edge* edge) const;
-
-  template <typename T>
-  void iterate_edges(T& functor) const { _edges->iterate_value<T>(functor); }
+  void put_chain(const Edge* chain, size_t length);
 };
 
 #endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
--- a/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,11 +38,7 @@
   return (const Edge*)edge.pointee()->mark() == &edge;
 }
 
-bool EdgeUtils::is_root(const Edge& edge) {
-  return edge.is_root();
-}
-
-static int field_offset(const Edge& edge) {
+static int field_offset(const StoredEdge& edge) {
   assert(!edge.is_root(), "invariant");
   const oop ref_owner = edge.reference_owner();
   assert(ref_owner != NULL, "invariant");
@@ -56,7 +52,7 @@
   return offset;
 }
 
-static const InstanceKlass* field_type(const Edge& edge) {
+static const InstanceKlass* field_type(const StoredEdge& edge) {
   assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant");
   return (const InstanceKlass*)edge.reference_owner_klass();
 }
@@ -138,175 +134,18 @@
     current = parent;
     parent = current->parent();
   }
-  return current;
-}
-
-// The number of references associated with the leak node;
-// can be viewed as the leak node "context".
-// Used to provide leak context for a "capped/skipped" reference chain.
-static const size_t leak_context = 100;
-
-// The number of references associated with the root node;
-// can be viewed as the root node "context".
-// Used to provide root context for a "capped/skipped" reference chain.
-static const size_t root_context = 100;
-
-// A limit on the reference chain depth to be serialized,
-static const size_t max_ref_chain_depth = leak_context + root_context;
-
-const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) {
-  const RoutableEdge* current = &edge;
-  const RoutableEdge* parent = current->physical_parent();
-  size_t seek = 0;
-  while (parent != NULL && seek != skip_length) {
-    seek++;
-    current = parent;
-    parent = parent->physical_parent();
-  }
-  return current;
-}
-
-#ifdef ASSERT
-static void validate_skip_target(const RoutableEdge* skip_target) {
-  assert(skip_target != NULL, "invariant");
-  assert(skip_target->distance_to_root() + 1 == root_context, "invariant");
-  assert(skip_target->is_sentinel(), "invariant");
-}
-
-static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) {
-  assert(new_skip_edge != NULL, "invariant");
-  assert(new_skip_edge->is_skip_edge(), "invariant");
-  if (last_skip_edge != NULL) {
-    const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment);
-    validate_skip_target(target->logical_parent());
-    return;
-  }
-  assert(last_skip_edge == NULL, "invariant");
-  // only one level of logical indirection
-  validate_skip_target(new_skip_edge->logical_parent());
-}
-#endif // ASSERT
-
-static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) {
-  assert(new_skip_edge != NULL, "invariant");
-  assert(!new_skip_edge->is_skip_edge(), "invariant");
-  assert(!new_skip_edge->processed(), "invariant");
-  const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance);
-  assert(skip_target != NULL, "invariant");
-  new_skip_edge->set_skip_edge(skip_target);
-  new_skip_edge->set_skip_length(skip_target_distance);
-  assert(new_skip_edge->is_skip_edge(), "invariant");
-  assert(new_skip_edge->logical_parent() == skip_target, "invariant");
-}
-
-static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) {
-  assert(distance == 0, "invariant");
-  const RoutableEdge* current = &edge;
-  while (current != NULL) {
-    if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) {
-      return current;
-    }
-    current = current->physical_parent();
-    ++distance;
-  }
+  assert(current != NULL, "invariant");
   return current;
 }
 
-static void collapse_overlapping_chain(const RoutableEdge& edge,
-                                       const RoutableEdge* first_processed_edge,
-                                       size_t first_processed_distance) {
-  assert(first_processed_edge != NULL, "invariant");
-  // first_processed_edge is already processed / written
-  assert(first_processed_edge->processed(), "invariant");
-  assert(first_processed_distance + 1 <= leak_context, "invariant");
-
-  // from this first processed edge, attempt to fetch the last skip edge
-  size_t last_skip_edge_distance = 0;
-  const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance);
-  const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1;
-
-  if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) {
-    // complete chain can be accommodated without modification
-    return;
-  }
-
-  // backtrack one edge from existing processed edge
-  const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1);
-  assert(new_skip_edge != NULL, "invariant");
-  assert(!new_skip_edge->processed(), "invariant");
-  assert(new_skip_edge->parent() == first_processed_edge, "invariant");
-
-  size_t adjustment = 0;
-  if (last_skip_edge != NULL) {
-    assert(leak_context - 1 > first_processed_distance - 1, "invariant");
-    adjustment = leak_context - first_processed_distance - 1;
-    assert(last_skip_edge_distance + 1 > adjustment, "invariant");
-    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment);
-  } else {
-    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context);
-    new_skip_edge->logical_parent()->set_skip_length(1); // sentinel
+const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) {
+  const Edge* current = &edge;
+  const Edge* parent = current->parent();
+  size_t seek = 0;
+  while (parent != NULL && seek != distance) {
+    seek++;
+    current = parent;
+    parent = parent->parent();
   }
-
-  DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);)
-}
-
-static void collapse_non_overlapping_chain(const RoutableEdge& edge,
-                                           const RoutableEdge* first_processed_edge,
-                                           size_t first_processed_distance) {
-  assert(first_processed_edge != NULL, "invariant");
-  assert(!first_processed_edge->processed(), "invariant");
-  // this implies that the first "processed" edge is the leak context relative "leaf"
-  assert(first_processed_distance + 1 == leak_context, "invariant");
-
-  const size_t distance_to_root = edge.distance_to_root();
-  if (distance_to_root + 1 <= max_ref_chain_depth) {
-    // complete chain can be accommodated without constructing a skip edge
-    return;
-  }
-
-  install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context);
-  first_processed_edge->logical_parent()->set_skip_length(1); // sentinel
-
-  DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);)
-}
-
-static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) {
-  assert(distance == 0, "invariant");
-  const RoutableEdge* current = &edge;
-  while (current != NULL && distance < leak_context - 1) {
-    if (current->processed()) {
-      return current;
-    }
-    current = current->physical_parent();
-    ++distance;
-  }
-  assert(distance <= leak_context - 1, "invariant");
   return current;
 }
-
-/*
- * Some vocabulary:
- * -----------
- * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
- * "Processed / written" means an edge that has already been serialized.
- * "Skip edge" is an edge that contains additional information for logical routing purposes.
- * "Skip target" is an edge used as a destination for a skip edge
- */
-void EdgeUtils::collapse_chain(const RoutableEdge& edge) {
-  assert(is_leak_edge(edge), "invariant");
-
-  // attempt to locate an already processed edge inside current leak context (if any)
-  size_t first_processed_distance = 0;
-  const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance);
-  if (first_processed_edge == NULL) {
-    return;
-  }
-
-  if (first_processed_edge->processed()) {
-    collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance);
-  } else {
-    collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance);
-  }
-
-  assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant");
-}
--- a/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -28,15 +28,17 @@
 #include "memory/allocation.hpp"
 
 class Edge;
-class RoutableEdge;
 class Symbol;
 
 class EdgeUtils : public AllStatic {
  public:
-  static bool is_leak_edge(const Edge& edge);
+  static const size_t leak_context = 100;
+  static const size_t root_context = 100;
+  static const size_t max_ref_chain_depth = leak_context + root_context;
 
+  static bool is_leak_edge(const Edge& edge);
   static const Edge* root(const Edge& edge);
-  static bool is_root(const Edge& edge);
+  static const Edge* ancestor(const Edge& edge, size_t distance);
 
   static bool is_array_element(const Edge& edge);
   static int array_index(const Edge& edge);
@@ -44,8 +46,6 @@
 
   static const Symbol* field_name_symbol(const Edge& edge);
   static jshort field_modifiers(const Edge& edge);
-
-  static void collapse_chain(const RoutableEdge& edge);
 };
 
 #endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeQueue.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/leakprofiler/utilities/granularTimer.hpp"
+#include "memory/universe.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) :
+  _sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {}
+
+/* The EdgeQueue is backed by directly managed virtual memory.
+ * We will attempt to dimension an initial reservation
+ * in proportion to the size of the heap (represented by heap_region).
+ * Initial memory reservation: 5% of the heap OR at least 32 Mb
+ * Commit ratio: 1 : 10 (subject to allocation granularties)
+ */
+static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
+  const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
+  assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
+  return memory_reservation_bytes;
+}
+
+static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
+  const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
+  assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
+  return memory_commit_block_size_bytes;
+}
+
+static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
+  if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
+  if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
+  if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
+  if (edge_queue.reserved_size() > 0) {
+    if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n",
+      ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
+  }
+}
+
+void PathToGcRootsOperation::doit() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(_cutoff_ticks > 0, "invariant");
+
+  // The bitset used for marking is dimensioned as a function of the heap size
+  const MemRegion heap_region = Universe::heap()->reserved_region();
+  BitSet mark_bits(heap_region);
+
+  // The edge queue is dimensioned as a fraction of the heap size
+  const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
+  EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
+
+  // The initialize() routines will attempt to reserve and allocate backing storage memory.
+  // Failure to accommodate will render root chain processing impossible.
+  // As a fallback on failure, just write out the existing samples, flat, without chains.
+  if (!(mark_bits.initialize() && edge_queue.initialize())) {
+    if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing");
+    return;
+  }
+
+  // Save the original markWord for the potential leak objects,
+  // to be restored on function exit
+  ObjectSampleMarker marker;
+  if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) {
+    // no valid samples to process
+    return;
+  }
+
+  // Necessary condition for attempting a root set iteration
+  Universe::heap()->ensure_parsability(false);
+
+  BFSClosure bfs(&edge_queue, _edge_store, &mark_bits);
+  RootSetClosure<BFSClosure> roots(&bfs);
+
+  GranularTimer::start(_cutoff_ticks, 1000000);
+  roots.process();
+  if (edge_queue.is_full()) {
+    // Pathological case where roots don't fit in queue
+    // Do a depth-first search, but mark roots first
+    // to avoid walking sideways over roots
+    DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits);
+  } else {
+    bfs.process();
+  }
+  GranularTimer::stop();
+  log_edge_queue_summary(edge_queue);
+
+  // Emit old objects including their reference chains as events
+  EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time());
+  emitter.write_events(_sampler, _edge_store, _emit_all);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
+#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
+
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
+
+class EdgeStore;
+class ObjectSampler;
+
+// Safepoint operation for finding paths to gc roots
+class PathToGcRootsOperation : public OldObjectVMOperation {
+ private:
+  ObjectSampler* _sampler;
+  EdgeStore* const _edge_store;
+  const int64_t _cutoff_ticks;
+  const bool _emit_all;
+
+ public:
+  PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all);
+  virtual void doit();
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
--- a/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -25,11 +25,14 @@
 #include "precompiled.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 #include "jfr/leakprofiler/utilities/saveRestore.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
 #include "memory/universe.hpp"
+#include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/synchronizer.hpp"
@@ -37,11 +40,11 @@
 #include "services/management.hpp"
 #include "utilities/align.hpp"
 
-RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) :
-  _edge_queue(edge_queue) {
-}
+template <typename Delegate>
+RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegate) {}
 
-void RootSetClosure::do_oop(oop* ref) {
+template <typename Delegate>
+void RootSetClosure<Delegate>::do_oop(oop* ref) {
   assert(ref != NULL, "invariant");
   // We discard unaligned root references because
   // our reference tagging scheme will use
@@ -55,48 +58,38 @@
   }
 
   assert(is_aligned(ref, HeapWordSize), "invariant");
-  const oop pointee = *ref;
-  if (pointee != NULL) {
-    closure_impl(ref, pointee);
+  if (*ref != NULL) {
+    _delegate->do_root(ref);
   }
 }
 
-void RootSetClosure::do_oop(narrowOop* ref) {
+template <typename Delegate>
+void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
   assert(ref != NULL, "invariant");
   assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
   const oop pointee = oopDesc::load_decode_heap_oop(ref);
   if (pointee != NULL) {
-    closure_impl(UnifiedOop::encode(ref), pointee);
-  }
-}
-
-void RootSetClosure::closure_impl(const oop* reference, const oop pointee) {
-  if (!_edge_queue->is_full())  {
-    _edge_queue->add(NULL, reference);
+    _delegate->do_root(UnifiedOop::encode(ref));
   }
 }
 
-void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) {
-  RootSetClosure rs(edge_queue);
-  process_roots(&rs);
+class RootSetClosureMarkScope : public MarkingCodeBlobClosure::MarkScope {};
+
+template <typename Delegate>
+void RootSetClosure<Delegate>::process() {
+  RootSetClosureMarkScope mark_scope;
+  CLDToOopClosure cldt_closure(this);
+  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
+  CodeBlobToOopClosure blobs(this, false);
+  Threads::oops_do(this, NULL, &blobs); // XXX set CLDClosure to NULL
+  ObjectSynchronizer::oops_do(this);
+  Universe::oops_do(this);
+  JNIHandles::oops_do(this);
+  JvmtiExport::oops_do(this);
+  SystemDictionary::oops_do(this);
+  Management::oops_do(this);
+  StringTable::oops_do(this);
 }
 
-class RootSetClosureMarkScope : public MarkingCodeBlobClosure::MarkScope {
-};
-
-void RootSetClosure::process_roots(OopClosure* closure) {
-  SaveRestoreCLDClaimBits save_restore_cld_claim_bits;
-  RootSetClosureMarkScope mark_scope;
-
-  CLDToOopClosure cldt_closure(closure);
-  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
-  CodeBlobToOopClosure blobs(closure, false);
-  Threads::oops_do(closure, NULL, &blobs); // XXX set CLDClosure to NULL
-  ObjectSynchronizer::oops_do(closure);
-  Universe::oops_do(closure);
-  JNIHandles::oops_do(closure);
-  JvmtiExport::oops_do(closure);
-  SystemDictionary::oops_do(closure);
-  Management::oops_do(closure);
-  StringTable::oops_do(closure);
-}
+template class RootSetClosure<BFSClosure>;
+template class RootSetClosure<DFSClosure>;
--- a/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -26,18 +26,14 @@
 #define SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
-class EdgeQueue;
-
+template <typename Delegate>
 class RootSetClosure: public ExtendedOopClosure { // BasicOopIterateClosure
  private:
-  RootSetClosure(EdgeQueue* edge_queue);
-  EdgeQueue* _edge_queue;
-  void closure_impl(const oop* reference, const oop pointee);
+  Delegate* const _delegate;
  public:
-  static void add_to_queue(EdgeQueue* edge_queue);
-  static void process_roots(OopClosure* closure);
+  RootSetClosure(Delegate* delegate);
+  void process();
 
   virtual void do_oop(oop* reference);
   virtual void do_oop(narrowOop* reference);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/vmThread.hpp"
+
+EventEmitter::EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time) :
+  _start_time(start_time),
+  _end_time(end_time),
+  _thread(Thread::current()),
+  _jfr_thread_local(_thread->jfr_thread_local()),
+  _thread_id(_thread->jfr_thread_local()->thread_id()) {}
+
+EventEmitter::~EventEmitter() {
+  // restore / reset thread local stack trace and thread id
+  _jfr_thread_local->set_thread_id(_thread_id);
+  _jfr_thread_local->clear_cached_stack_trace();
+}
+
+void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all) {
+  assert(sampler != NULL, "invariant");
+
+  ResourceMark rm;
+  EdgeStore edge_store;
+  if (cutoff_ticks <= 0) {
+    // no reference chains
+    JfrTicks time_stamp = JfrTicks::now();
+    EventEmitter emitter(time_stamp, time_stamp);
+    emitter.write_events(sampler, &edge_store, emit_all);
+    return;
+  }
+  // events emitted with reference chains require a safepoint operation
+  PathToGcRootsOperation op(sampler, &edge_store, cutoff_ticks, emit_all);
+  VMThread::execute(&op);
+}
+
+size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) {
+  assert(_thread == Thread::current(), "invariant");
+  assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant");
+  assert(object_sampler != NULL, "invariant");
+  assert(edge_store != NULL, "invariant");
+
+  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
+  size_t count = 0;
+
+  const ObjectSample* current = object_sampler->first();
+  while (current != NULL) {
+    ObjectSample* prev = current->prev();
+    if (current->is_alive_and_older_than(last_sweep)) {
+      write_event(current, edge_store);
+      ++count;
+    }
+    current = prev;
+  }
+
+  if (count > 0) {
+    // serialize associated checkpoints and potential chains
+    ObjectSampleCheckpoint::write(object_sampler, edge_store, emit_all, _thread);
+  }
+  return count;
+}
+
+static int array_size(const oop object) {
+  assert(object != NULL, "invariant");
+  if (object->is_array()) {
+    return arrayOop(object)->length();
+  }
+  return min_jint;
+}
+
+void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
+  assert(sample != NULL, "invariant");
+  assert(!sample->is_dead(), "invariant");
+  assert(edge_store != NULL, "invariant");
+  assert(_jfr_thread_local != NULL, "invariant");
+
+  const oop* object_addr = sample->object_addr();
+  traceid gc_root_id = 0;
+  const Edge* edge = NULL;
+  if (SafepointSynchronize::is_at_safepoint()) {
+    edge = (const Edge*)(*object_addr)->mark();
+  }
+  if (edge == NULL) {
+    // In order to dump out a representation of the event
+    // even though it was not reachable / too long to reach,
+    // we need to register a top level edge for this object.
+    edge = edge_store->put(object_addr);
+  } else {
+    gc_root_id = edge_store->gc_root_id(edge);
+  }
+
+  assert(edge != NULL, "invariant");
+  const traceid object_id = edge_store->get_id(edge);
+  assert(object_id != 0, "invariant");
+
+  EventOldObjectSample e(UNTIMED);
+  e.set_starttime(_start_time);
+  e.set_endtime(_end_time);
+  e.set_allocationTime(sample->allocation_time());
+  e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
+  e.set_object(object_id);
+  e.set_arrayElements(array_size(edge->pointee()));
+  e.set_root(gc_root_id);
+
+  // Temporarily assigning both the stack trace id and thread id
+  // onto the thread local data structure of the emitter thread (for the duration
+  // of the commit() call). This trick provides a means to override
+  // the event generation mechanism by injecting externally provided id's.
+  // At this particular location, it allows us to emit an old object event
+  // supplying information from where the actual sampling occurred.
+  _jfr_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
+  assert(sample->has_thread(), "invariant");
+  _jfr_thread_local->set_thread_id(sample->thread_id());
+  e.commit();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
+#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+
+typedef u8 traceid;
+
+class EdgeStore;
+class JfrThreadLocal;
+class ObjectSample;
+class ObjectSampler;
+class Thread;
+
+class EventEmitter : public CHeapObj<mtTracing> {
+  friend class LeakProfiler;
+  friend class PathToGcRootsOperation;
+ private:
+  const JfrTicks& _start_time;
+  const JfrTicks& _end_time;
+  Thread* _thread;
+  JfrThreadLocal* _jfr_thread_local;
+  traceid _thread_id;
+
+  EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time);
+  ~EventEmitter();
+
+  void write_event(const ObjectSample* sample, EdgeStore* edge_store);
+  size_t write_events(ObjectSampler* sampler, EdgeStore* store, bool emit_all);
+
+  static void emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all);
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
--- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -181,102 +181,89 @@
   }
 };
 
-void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) {
-  assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant");
-
+void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool type_set) {
   if (!writer.has_data()) {
-    if (!class_unload) {
-      LeakProfiler::resume();
-    }
-    assert(LeakProfiler::is_running(), "invariant");
     return;
   }
 
   assert(writer.has_data(), "invariant");
   const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
+  CheckpointInstall install(h_cp);
 
-  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
+  // Class unload implies a safepoint.
+  // Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
+  // Therefore: direct access the object sampler instance is safe.
+  ObjectSampler* const object_sampler = ObjectSampler::sampler();
   assert(object_sampler != NULL, "invariant");
 
   ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
   const ObjectSample* const last_resolved = object_sampler->last_resolved();
-  CheckpointInstall install(h_cp);
 
-  if (class_unload) {
-    if (last != NULL) {
-      // all samples need the class unload information
-      do_samples(last, NULL, install);
-    }
-    assert(LeakProfiler::is_running(), "invariant");
-    return;
-  }
-
-  // only new samples since last resolved checkpoint
+  // install only to new samples since last resolved checkpoint
   if (last != last_resolved) {
     do_samples(last, last_resolved, install);
-    if (resume) {
-      const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
+    if (class_unload) {
+      return;
     }
-  }
-  assert(LeakProfiler::is_suspended(), "invariant");
-  if (resume) {
-    LeakProfiler::resume();
-    assert(LeakProfiler::is_running(), "invariant");
+    if (type_set) {
+      object_sampler->set_last_resolved(last);
+    }
   }
 }
 
-void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) {
+void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
+  assert(sampler != NULL, "invariant");
   assert(edge_store != NULL, "invariant");
   assert(thread != NULL, "invariant");
+
   static bool types_registered = false;
   if (!types_registered) {
     JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
     JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
     types_registered = true;
   }
-  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+
+  const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
+  ObjectSample* const last = const_cast<ObjectSample*>(sampler->last());
   {
     JfrCheckpointWriter writer(false, false, thread);
     CheckpointWrite checkpoint_write(writer, last_sweep);
     do_samples(last, NULL, checkpoint_write);
   }
+
   CheckpointStateReset state_reset(last_sweep);
   do_samples(last, NULL, state_reset);
+
   if (!edge_store->is_empty()) {
     // java object and chain representations
     JfrCheckpointWriter writer(false, true, thread);
     ObjectSampleWriter osw(writer, edge_store);
-    edge_store->iterate_edges(osw);
+    edge_store->iterate(osw);
   }
 }
 
-WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) :
-  _stack_trace_repo(repo) {
+int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) {
+  assert(object_sampler != NULL, "invariant");
+  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+  if (last == NULL) {
+    return 0;
+  }
+  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
+  SampleMark mark(marker, last_sweep);
+  do_samples(last, NULL, mark);
+  return mark.count();
 }
 
+WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) :
+  _sampler(sampler), _stack_trace_repo(repo) {}
+
 bool WriteObjectSampleStacktrace::process() {
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  if (!LeakProfiler::is_running()) {
-    return true;
-  }
-  // Suspend the LeakProfiler subsystem
-  // to ensure stable samples even
-  // after we return from the safepoint.
-  LeakProfiler::suspend();
-  assert(!LeakProfiler::is_running(), "invariant");
-  assert(LeakProfiler::is_suspended(), "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  assert(_sampler != NULL, "invariant");
 
-  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  assert(LeakProfiler::is_suspended(), "invariant");
-
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  const ObjectSample* const last_resolved = object_sampler->last_resolved();
+  ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last());
+  const ObjectSample* const last_resolved = _sampler->last_resolved();
   if (last == last_resolved) {
-    assert(LeakProfiler::is_suspended(), "invariant");
     return true;
   }
 
@@ -294,27 +281,13 @@
   }
   if (count == 0) {
     writer.set_context(ctx);
-    assert(LeakProfiler::is_suspended(), "invariant");
     return true;
   }
   assert(count > 0, "invariant");
   writer.write_count((u4)count, count_offset);
   JfrStackTraceRepository::write_metadata(writer);
 
+  // install the stacktrace checkpoint information to the candidates
   ObjectSampleCheckpoint::install(writer, false, false);
-  assert(LeakProfiler::is_suspended(), "invariant");
   return true;
 }
-
-int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) {
-  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  if (last == NULL) {
-    return 0;
-  }
-  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
-  SampleMark mark(marker, last_sweep);
-  do_samples(last, NULL, mark);
-  return mark.count();
-}
--- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -26,25 +26,26 @@
 #define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
 
 #include "memory/allocation.hpp"
-#include "utilities/exceptions.hpp"
 
 class EdgeStore;
+class JfrCheckpointWriter;
 class JfrStackTraceRepository;
-class JfrCheckpointWriter;
 class ObjectSampleMarker;
+class ObjectSampler;
 
 class ObjectSampleCheckpoint : AllStatic {
  public:
-  static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume);
-  static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread);
-  static int mark(ObjectSampleMarker& marker, bool emit_all);
+  static void install(JfrCheckpointWriter& writer, bool class_unload, bool type_set);
+  static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
+  static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
 };
 
 class WriteObjectSampleStacktrace : public StackObj {
  private:
+  ObjectSampler* const _sampler;
   JfrStackTraceRepository& _stack_trace_repo;
  public:
-  WriteObjectSampleStacktrace(JfrStackTraceRepository& repo);
+  WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo);
   bool process();
 };
 
--- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -350,7 +350,7 @@
   return 1;
 }
 
-static traceid get_root_description_info_id(const Edge& edge, traceid id) {
+static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) {
   assert(edge.is_root(), "invariant");
   if (EdgeUtils::is_leak_edge(edge)) {
     return 0;
@@ -518,7 +518,7 @@
   }
 }
 
-static void add_old_object_sample_info(const Edge* current, traceid id) {
+static void add_old_object_sample_info(const StoredEdge* current, traceid id) {
   assert(current != NULL, "invariant");
   if (sample_infos == NULL) {
     sample_infos = new SampleInfo();
@@ -528,11 +528,11 @@
   assert(oosi != NULL, "invariant");
   oosi->_id = id;
   oosi->_data._object = current->pointee();
-  oosi->_data._reference_id = current->is_root() ? (traceid)0 : id;
+  oosi->_data._reference_id = current->parent() == NULL ? (traceid)0 : id;
   sample_infos->store(oosi);
 }
 
-static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) {
+static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) {
   assert(current != NULL, "invariant");
   if (ref_infos == NULL) {
     ref_infos = new RefInfo();
@@ -544,37 +544,43 @@
 
   ri->_id = id;
   ri->_data._array_info_id =  !current->is_skip_edge() ? get_array_info_id(*current, id) : 0;
-  ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ?
-                               get_field_info_id(*current) : (traceid)0;
+  ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? get_field_info_id(*current) : (traceid)0;
   ri->_data._old_object_sample_id = parent_id;
   ri->_data._skip = current->skip_length();
   ref_infos->store(ri);
 }
 
-static traceid add_root_info(const Edge* root, traceid id) {
-  assert(root != NULL, "invariant");
-  assert(root->is_root(), "invariant");
-  return get_root_description_info_id(*root, id);
+static bool is_gc_root(const StoredEdge* current) {
+  assert(current != NULL, "invariant");
+  return current->parent() == NULL && current->gc_root_id() != 0;
 }
 
-void ObjectSampleWriter::write(const RoutableEdge* edge) {
+static traceid add_gc_root_info(const StoredEdge* root, traceid id) {
+  assert(root != NULL, "invariant");
+  assert(is_gc_root(root), "invariant");
+  return get_gc_root_description_info_id(*root, id);
+}
+
+void ObjectSampleWriter::write(const StoredEdge* edge) {
   assert(edge != NULL, "invariant");
   const traceid id = _store->get_id(edge);
   add_old_object_sample_info(edge, id);
-  const RoutableEdge* parent = edge->logical_parent();
+  const StoredEdge* const parent = edge->parent();
   if (parent != NULL) {
     add_reference_info(edge, id, _store->get_id(parent));
   } else {
-    assert(edge->is_root(), "invariant");
-    add_root_info(edge, id);
+    if (is_gc_root(edge)) {
+      assert(edge->gc_root_id() == id, "invariant");
+      add_gc_root_info(edge, id);
+    }
   }
 }
 
-ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) :
+ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
   _writer(writer),
   _store(store) {
   assert(store != NULL, "invariant");
-  assert(store->number_of_entries() > 0, "invariant");
+  assert(!store->is_empty(), "invariant");
   sample_infos = NULL;
   ref_infos = NULL;
   array_infos = NULL;
@@ -590,26 +596,7 @@
   write_root_descriptors(_writer);
 }
 
-void ObjectSampleWriter::write_chain(const RoutableEdge& edge) {
-  assert(EdgeUtils::is_leak_edge(edge), "invariant");
-  if (edge.processed()) {
-    return;
-  }
-  EdgeUtils::collapse_chain(edge);
-  const RoutableEdge* current = &edge;
-  while (current != NULL) {
-    if (current->processed()) {
-      return;
-    }
-    write(current);
-    current->set_processed();
-    current = current->logical_parent();
-  }
-}
-
-bool ObjectSampleWriter::operator()(const RoutableEdge& edge) {
-  if (EdgeUtils::is_leak_edge(edge)) {
-    write_chain(edge);
-  }
+bool ObjectSampleWriter::operator()(StoredEdge& e) {
+  write(&e);
   return true;
 }
--- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -30,21 +30,17 @@
 class Edge;
 class EdgeStore;
 class JfrCheckpointWriter;
-class RoutableEdge;
+class StoredEdge;
 
 class ObjectSampleWriter : public StackObj {
  private:
   JfrCheckpointWriter& _writer;
-  const EdgeStore* const _store;
-
-  void write(const RoutableEdge* edge);
-  void write_chain(const RoutableEdge& edge);
-
+  EdgeStore* const _store;
+  void write(const StoredEdge* edge);
  public:
-  ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store);
+  ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store);
   ~ObjectSampleWriter();
-
-  bool operator()(const RoutableEdge& edge);
+  bool operator()(StoredEdge& edge);
 };
 
 #endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
--- a/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -25,8 +25,8 @@
 #ifndef SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
 #define SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
 
+#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "memory/allocation.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "oops/oopsHierarchy.hpp"
 
 struct RootCallbackInfo {
--- a/src/share/vm/jfr/leakprofiler/emitEventOperation.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#include "precompiled.hpp"
-#include "gc_interface/collectedHeap.hpp"
-#include "jfr/jfrEvents.hpp"
-#include "jfr/leakprofiler/utilities/granularTimer.hpp"
-#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
-#include "jfr/leakprofiler/chains/edge.hpp"
-#include "jfr/leakprofiler/chains/edgeQueue.hpp"
-#include "jfr/leakprofiler/chains/edgeStore.hpp"
-#include "jfr/leakprofiler/chains/bitset.hpp"
-#include "jfr/leakprofiler/sampling/objectSample.hpp"
-#include "jfr/leakprofiler/leakProfiler.hpp"
-#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
-#include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/leakprofiler/emitEventOperation.hpp"
-#include "jfr/leakprofiler/chains/bfsClosure.hpp"
-#include "jfr/leakprofiler/chains/dfsClosure.hpp"
-#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-#include "jfr/support/jfrThreadId.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/markOop.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-/* The EdgeQueue is backed by directly managed virtual memory.
- * We will attempt to dimension an initial reservation
- * in proportion to the size of the heap (represented by heap_region).
- * Initial memory reservation: 5% of the heap OR at least 32 Mb
- * Commit ratio: 1 : 10 (subject to allocation granularties)
- */
-static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
-  const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
-  assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
-  return memory_reservation_bytes;
-}
-
-static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
-  const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
-  assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
-  return memory_commit_block_size_bytes;
-}
-
-static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
-  if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
-  if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
-  if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
-  if (edge_queue.reserved_size() > 0) {
-    if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n",
-      ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
-  }
-}
-
-void EmitEventOperation::doit() {
-  assert(LeakProfiler::is_running(), "invariant");
-  _object_sampler = LeakProfiler::object_sampler();
-  assert(_object_sampler != NULL, "invariant");
-
-  _vm_thread = VMThread::vm_thread();
-  assert(_vm_thread == Thread::current(), "invariant");
-  _vm_thread_local = _vm_thread->jfr_thread_local();
-  assert(_vm_thread_local != NULL, "invariant");
-  assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  // The VM_Operation::evaluate() which invoked doit()
-  // contains a top level ResourceMark
-
-  // save the original markWord for the potential leak objects
-  // to be restored on function exit
-  ObjectSampleMarker marker;
-  if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
-    return;
-  }
-
-  EdgeStore edge_store;
-
-  GranularTimer::start(_cutoff_ticks, 1000000);
-  if (_cutoff_ticks <= 0) {
-    // no chains
-    write_events(&edge_store);
-    return;
-  }
-
-  assert(_cutoff_ticks > 0, "invariant");
-
-  // The bitset used for marking is dimensioned as a function of the heap size
-  const MemRegion heap_region = Universe::heap()->reserved_region();
-  BitSet mark_bits(heap_region);
-
-  // The edge queue is dimensioned as a fraction of the heap size
-  const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
-  EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
-
-  // The initialize() routines will attempt to reserve and allocate backing storage memory.
-  // Failure to accommodate will render root chain processing impossible.
-  // As a fallback on failure, just write out the existing samples, flat, without chains.
-  if (!(mark_bits.initialize() && edge_queue.initialize())) {
-    if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing");
-    write_events(&edge_store);
-    return;
-  }
-
-  // necessary condition for attempting a root set iteration
-  Universe::heap()->ensure_parsability(false);
-
-  RootSetClosure::add_to_queue(&edge_queue);
-  if (edge_queue.is_full()) {
-    // Pathological case where roots don't fit in queue
-    // Do a depth-first search, but mark roots first
-    // to avoid walking sideways over roots
-    DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
-  } else {
-    BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
-    bfs.process();
-  }
-  GranularTimer::stop();
-  write_events(&edge_store);
-  log_edge_queue_summary(edge_queue);
-}
-
-int EmitEventOperation::write_events(EdgeStore* edge_store) {
-  assert(_object_sampler != NULL, "invariant");
-  assert(edge_store != NULL, "invariant");
-  assert(_vm_thread != NULL, "invariant");
-  assert(_vm_thread_local != NULL, "invariant");
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-
-  // save thread id in preparation for thread local trace data manipulations
-  const traceid vmthread_id = _vm_thread_local->thread_id();
-  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
-  int count = 0;
-
-  const ObjectSample* current = _object_sampler->first();
-  while (current != NULL) {
-    ObjectSample* prev = current->prev();
-    if (current->is_alive_and_older_than(last_sweep)) {
-      write_event(current, edge_store);
-      ++count;
-    }
-    current = prev;
-  }
-
-  // restore thread local stack trace and thread id
-  _vm_thread_local->set_thread_id(vmthread_id);
-  _vm_thread_local->clear_cached_stack_trace();
-  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  if (count > 0) {
-    // serialize assoicated checkpoints
-    ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
-  }
-  return count;
-}
-
-static int array_size(const oop object) {
-  assert(object != NULL, "invariant");
-  if (object->is_array()) {
-    return arrayOop(object)->length();
-  }
-  return min_jint;
-}
-
-void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
-  assert(sample != NULL, "invariant");
-  assert(!sample->is_dead(), "invariant");
-  assert(edge_store != NULL, "invariant");
-  assert(_vm_thread_local != NULL, "invariant");
-  const oop* object_addr = sample->object_addr();
-  assert(*object_addr != NULL, "invariant");
-
-  const Edge* edge = (const Edge*)(*object_addr)->mark();
-  traceid gc_root_id = 0;
-  if (edge == NULL) {
-    // In order to dump out a representation of the event
-    // even though it was not reachable / too long to reach,
-    // we need to register a top level edge for this object
-    Edge e(NULL, object_addr);
-    edge_store->add_chain(&e, 1);
-    edge = (const Edge*)(*object_addr)->mark();
-  } else {
-    gc_root_id = edge_store->get_root_id(edge);
-  }
-
-  assert(edge != NULL, "invariant");
-  assert(edge->pointee() == *object_addr, "invariant");
-  const traceid object_id = edge_store->get_id(edge);
-  assert(object_id != 0, "invariant");
-
-  EventOldObjectSample e(UNTIMED);
-  e.set_starttime(GranularTimer::start_time());
-  e.set_endtime(GranularTimer::end_time());
-  e.set_allocationTime(sample->allocation_time());
-  e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
-  e.set_object(object_id);
-  e.set_arrayElements(array_size(*object_addr));
-  e.set_root(gc_root_id);
-
-  // Temporarily assigning both the stack trace id and thread id
-  // onto the thread local data structure of the VMThread (for the duration
-  // of the commit() call). This trick provides a means to override
-  // the event generation mechanism by injecting externally provided id's.
-  // Here, in particular, this allows us to emit an old object event
-  // supplying information from where the actual sampling occurred.
-  _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
-  assert(sample->has_thread(), "invariant");
-  _vm_thread_local->set_thread_id(sample->thread_id());
-  e.commit();
-}
--- a/src/share/vm/jfr/leakprofiler/emitEventOperation.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP
-#define SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP
-
-#include "runtime/vm_operations.hpp"
-
-class BFSClosure;
-class EdgeStore;
-class EdgeQueue;
-class JfrThreadData;
-class ObjectSample;
-class ObjectSampler;
-
-// Safepoint operation for emitting object sample events
-class EmitEventOperation : public VM_Operation {
- private:
-  jlong _cutoff_ticks;
-  bool _emit_all;
-  VMThread* _vm_thread;
-  JfrThreadLocal* _vm_thread_local;
-  ObjectSampler* _object_sampler;
-
-  void write_event(const ObjectSample* sample, EdgeStore* edge_store);
-  int write_events(EdgeStore* edge_store);
-
- public:
-  EmitEventOperation(jlong cutoff_ticks, bool emit_all) :
-    _cutoff_ticks(cutoff_ticks),
-    _emit_all(emit_all),
-    _vm_thread(NULL),
-    _vm_thread_local(NULL),
-    _object_sampler(NULL) {
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
-
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  virtual void doit();
-};
-
-#endif // SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP
--- a/src/share/vm/jfr/leakprofiler/leakProfiler.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/leakProfiler.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,68 +23,80 @@
  */
 
 #include "precompiled.hpp"
-#include "jfr/leakprofiler/emitEventOperation.hpp"
 #include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/startOperation.hpp"
 #include "jfr/leakprofiler/stopOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/recorder/service/jfrOptionSet.hpp"
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
-#include "utilities/ostream.hpp"
-
-// Only to be updated during safepoint
-ObjectSampler* LeakProfiler::_object_sampler = NULL;
 
-static volatile jbyte suspended = 0;
-bool LeakProfiler::start(jint sample_count) {
-  if (_object_sampler != NULL) {
-    // already started
+bool LeakProfiler::is_running() {
+  return ObjectSampler::is_created();
+}
+
+bool LeakProfiler::start(int sample_count) {
+  if (is_running()) {
     return true;
   }
+
   // Allows user to disable leak profiler on command line by setting queue size to zero.
-  if (sample_count > 0) {
-    StartOperation op(sample_count);
-    VMThread::execute(&op);
-    return _object_sampler != NULL;
+  if (sample_count == 0) {
+    return false;
   }
-  return false;
+
+  assert(!is_running(), "invariant");
+  assert(sample_count > 0, "invariant");
+
+  // schedule the safepoint operation for installing the object sampler
+  StartOperation op(sample_count);
+  VMThread::execute(&op);
+
+  if (!is_running()) {
+    if (LogJFR && Verbose) tty->print_cr("Object sampling could not be started because the sampler could not be allocated");
+    return false;
+  }
+  assert(is_running(), "invariant");
+  if (LogJFR && Verbose) tty->print_cr("Object sampling started");
+  return true;
 }
 
 bool LeakProfiler::stop() {
-  if (_object_sampler == NULL) {
-    // already stopped/not started
-    return true;
+  if (!is_running()) {
+    return false;
   }
+
+  // schedule the safepoint operation for uninstalling and destroying the object sampler
   StopOperation op;
   VMThread::execute(&op);
-  return _object_sampler == NULL;
+
+  assert(!is_running(), "invariant");
+  if (LogJFR && Verbose) tty->print_cr("Object sampling stopped");
+  return true;
 }
 
-void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) {
+void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) {
   if (!is_running()) {
     return;
   }
-  EmitEventOperation op(cutoff_ticks, emit_all);
-  VMThread::execute(&op);
+  // exclusive access to object sampler instance
+  ObjectSampler* const sampler = ObjectSampler::acquire();
+  assert(sampler != NULL, "invariant");
+  EventEmitter::emit(sampler, cutoff_ticks, emit_all);
+  ObjectSampler::release();
 }
 
 void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(),
     "Leak Profiler::oops_do(...) may only be called during safepoint");
-
-  if (_object_sampler != NULL) {
-    _object_sampler->oops_do(is_alive, f);
+  if (is_running()) {
+    ObjectSampler::oops_do(is_alive, f);
   }
 }
 
-void LeakProfiler::sample(HeapWord* object,
-                          size_t size,
-                          JavaThread* thread) {
+void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) {
   assert(is_running(), "invariant");
   assert(thread != NULL, "invariant");
   assert(thread->thread_state() == _thread_in_vm, "invariant");
@@ -94,39 +106,5 @@
     return;
   }
 
-  _object_sampler->add(object, size, thread);
-}
-
-ObjectSampler* LeakProfiler::object_sampler() {
-  assert(is_suspended() || SafepointSynchronize::is_at_safepoint(),
-    "Leak Profiler::object_sampler() may only be called during safepoint");
-  return _object_sampler;
-}
-
-void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) {
-  assert(SafepointSynchronize::is_at_safepoint(),
-    "Leak Profiler::set_object_sampler() may only be called during safepoint");
-  _object_sampler = object_sampler;
-}
-
-bool LeakProfiler::is_running() {
-  return _object_sampler != NULL && !suspended;
+  ObjectSampler::sample(object, size, thread);
 }
-
-bool LeakProfiler::is_suspended() {
-  return _object_sampler != NULL && suspended;
-}
-
-void LeakProfiler::resume() {
-  assert(is_suspended(), "invariant");
-  OrderAccess::storestore();
-  Atomic::store((jbyte)0, &suspended);
-  assert(is_running(), "invariant");
-}
-
-void LeakProfiler::suspend() {
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  assert(_object_sampler != NULL, "invariant");
-  assert(!is_suspended(), "invariant");
-  suspended = (jbyte)1; // safepoint visible
-}
--- a/src/share/vm/jfr/leakprofiler/leakProfiler.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/leakProfiler.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -28,35 +28,15 @@
 #include "memory/allocation.hpp"
 
 class BoolObjectClosure;
-class ObjectSampler;
 class OopClosure;
-class Thread;
 
 class LeakProfiler : public AllStatic {
-  friend class ClassUnloadTypeSet;
-  friend class EmitEventOperation;
-  friend class ObjectSampleCheckpoint;
-  friend class StartOperation;
-  friend class StopOperation;
-  friend class TypeSet;
-  friend class WriteObjectSampleStacktrace;
-
- private:
-  static ObjectSampler* _object_sampler;
-
-  static void set_object_sampler(ObjectSampler* object_sampler);
-  static ObjectSampler* object_sampler();
-
-  static void suspend();
-  static void resume();
-  static bool is_suspended();
-
  public:
-  static bool start(jint sample_count);
+  static bool start(int sample_count);
   static bool stop();
-  static void emit_events(jlong cutoff_ticks, bool emit_all);
   static bool is_running();
 
+  static void emit_events(int64_t cutoff_ticks, bool emit_all);
   static void sample(HeapWord* object, size_t size, JavaThread* thread);
 
   // Called by GC
--- a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,8 +34,18 @@
 #include "jfr/utilities/jfrTryLock.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
 
+static ObjectSampler* _instance = NULL;
+
+static ObjectSampler& instance() {
+  assert(_instance != NULL, "invariant");
+  return *_instance;
+}
+
 ObjectSampler::ObjectSampler(size_t size) :
   _priority_queue(new SamplePriorityQueue(size)),
   _list(new SampleList(size)),
@@ -43,7 +53,6 @@
   _total_allocated(0),
   _threshold(0),
   _size(size),
-  _tryLock(0),
   _dead_samples(false) {}
 
 ObjectSampler::~ObjectSampler() {
@@ -53,32 +62,109 @@
   _list = NULL;
 }
 
-void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
+bool ObjectSampler::create(size_t size) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(_instance == NULL, "invariant");
+  _instance = new ObjectSampler(size);
+  return _instance != NULL;
+}
+
+bool ObjectSampler::is_created() {
+  return _instance != NULL;
+}
+
+ObjectSampler* ObjectSampler::sampler() {
+  assert(is_created(), "invariant");
+  return _instance;
+}
+
+void ObjectSampler::destroy() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  if (_instance != NULL) {
+    ObjectSampler* const sampler = _instance;
+    _instance = NULL;
+    delete sampler;
+  }
+}
+
+static volatile int _lock = 0;
+
+ObjectSampler* ObjectSampler::acquire() {
+  assert(is_created(), "invariant");
+  while (Atomic::cmpxchg(1, &_lock, 0) == 1) {}
+  return _instance;
+}
+
+void ObjectSampler::release() {
+  assert(is_created(), "invariant");
+  OrderAccess::fence();
+  _lock = 0;
+}
+
+static traceid get_thread_id(JavaThread* thread) {
   assert(thread != NULL, "invariant");
-  const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0;
+  if (thread->threadObj() == NULL) {
+    return 0;
+  }
+  const JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  if (!tl->has_thread_checkpoint()) {
+    JfrCheckpointManager::create_thread_checkpoint(thread);
+  }
+  assert(tl->has_thread_checkpoint(), "invariant");
+  return tl->thread_id();
+}
+
+// Populates the thread local stack frames, but does not add them
+// to the stacktrace repository (...yet, see stacktrace_id() below)
+//
+void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread != NULL, "invariant");
+  if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
+    JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0);
+  }
+}
+
+// We were successful in acquiring the try lock and have been selected for adding a sample.
+// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
+//
+traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(stacktrace->hash() != 0, "invariant");
+  const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread);
+  thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash());
+  return stacktrace_id;
+}
+
+void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
+  assert(thread != NULL, "invariant");
+  assert(is_created(), "invariant");
+
+  const traceid thread_id = get_thread_id(thread);
   if (thread_id == 0) {
     return;
   }
-  assert(thread_id != 0, "invariant");
-
-  if (!thread->jfr_thread_local()->has_thread_checkpoint()) {
-    JfrCheckpointManager::create_thread_checkpoint(thread);
-    assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
-  }
+  const JfrThreadLocal* const tl = thread->jfr_thread_local();
+  JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
+  fill_stacktrace(&stacktrace, thread);
 
-  traceid stack_trace_id = 0;
-  unsigned int stack_trace_hash = 0;
-  if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
-    stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash);
-    thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash);
-  }
-
-  JfrTryLock tryLock(&_tryLock);
+  // try enter critical section
+  JfrTryLock tryLock(&_lock);
   if (!tryLock.has_lock()) {
     if (LogJFR && Verbose) tty->print_cr("Skipping old object sample due to lock contention");
     return;
   }
 
+  instance().add(obj, allocated, thread_id, &stacktrace, thread);
+}
+
+void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread_id != 0, "invariant");
+  assert(thread != NULL, "invariant");
+  assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+
   if (_dead_samples) {
     scavenge();
     assert(!_dead_samples, "invariant");
@@ -100,13 +186,13 @@
   }
 
   assert(sample != NULL, "invariant");
-  assert(thread_id != 0, "invariant");
   sample->set_thread_id(thread_id);
   sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
 
-  if (stack_trace_id != 0) {
-    sample->set_stack_trace_id(stack_trace_id);
-    sample->set_stack_trace_hash(stack_trace_hash);
+  const unsigned int stacktrace_hash = stacktrace->hash();
+  if (stacktrace_hash != 0) {
+    sample->set_stack_trace_id(stacktrace_id(stacktrace, thread));
+    sample->set_stack_trace_hash(stacktrace_hash);
   }
 
   sample->set_span(allocated);
@@ -117,6 +203,53 @@
   _priority_queue->push(sample);
 }
 
+void ObjectSampler::scavenge() {
+  ObjectSample* current = _list->last();
+  while (current != NULL) {
+    ObjectSample* next = current->next();
+    if (current->is_dead()) {
+      remove_dead(current);
+    }
+    current = next;
+  }
+  _dead_samples = false;
+}
+
+void ObjectSampler::remove_dead(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  assert(sample->is_dead(), "invariant");
+  ObjectSample* const previous = sample->prev();
+  // push span on to previous
+  if (previous != NULL) {
+    _priority_queue->remove(previous);
+    previous->add_span(sample->span());
+    _priority_queue->push(previous);
+  }
+  _priority_queue->remove(sample);
+  _list->release(sample);
+}
+
+void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  assert(is_created(), "invariant");
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  ObjectSampler& sampler = instance();
+  ObjectSample* current = sampler._list->last();
+  while (current != NULL) {
+    ObjectSample* next = current->next();
+    if (!current->is_dead()) {
+      if (is_alive->do_object_b(current->object())) {
+        // The weakly referenced object is alive, update pointer
+        f->do_oop(const_cast<oop*>(current->object_addr()));
+      } else {
+        current->set_dead();
+        sampler._dead_samples = true;
+      }
+    }
+    current = next;
+  }
+  sampler._last_sweep = JfrTicks::now();
+}
+
 const ObjectSample* ObjectSampler::last() const {
   return _list->last();
 }
@@ -133,50 +266,6 @@
   _list->set_last_resolved(sample);
 }
 
-void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
-  ObjectSample* current = _list->last();
-  while (current != NULL) {
-    ObjectSample* next = current->next();
-    if (!current->is_dead()) {
-      if (is_alive->do_object_b(current->object())) {
-        // The weakly referenced object is alive, update pointer
-        f->do_oop(const_cast<oop*>(current->object_addr()));
-      } else {
-        current->set_dead();
-        _dead_samples = true;
-      }
-    }
-    current = next;
-  }
-  _last_sweep = JfrTicks::now();
-}
-
-void ObjectSampler::remove_dead(ObjectSample* sample) {
-  assert(sample != NULL, "invariant");
-  assert(sample->is_dead(), "invariant");
-  ObjectSample* const previous = sample->prev();
-  // push span on to previous
-  if (previous != NULL) {
-    _priority_queue->remove(previous);
-    previous->add_span(sample->span());
-    _priority_queue->push(previous);
-  }
-  _priority_queue->remove(sample);
-  _list->release(sample);
-}
-
-void ObjectSampler::scavenge() {
-  ObjectSample* current = _list->last();
-  while (current != NULL) {
-    ObjectSample* next = current->next();
-    if (current->is_dead()) {
-      remove_dead(current);
-    }
-    current = next;
-  }
-  _dead_samples = false;
-}
-
 int ObjectSampler::item_count() const {
   return _priority_queue->count();
 }
@@ -188,7 +277,7 @@
 ObjectSample* ObjectSampler::item_at(int index) {
   return const_cast<ObjectSample*>(
     const_cast<const ObjectSampler*>(this)->item_at(index)
-                                   );
+                                  );
 }
 
 const JfrTicks& ObjectSampler::last_sweep() const {
--- a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -28,7 +28,10 @@
 #include "memory/allocation.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 
+typedef u8 traceid;
+
 class BoolObjectClosure;
+class JfrStackTrace;
 class OopClosure;
 class ObjectSample;
 class ObjectSampler;
@@ -40,11 +43,13 @@
 // making sure the samples are evenly distributed as
 // new entries are added and removed.
 class ObjectSampler : public CHeapObj<mtTracing> {
+  friend class EventEmitter;
+  friend class JfrRecorderService;
   friend class LeakProfiler;
-  friend class ObjectSampleCheckpoint;
   friend class StartOperation;
   friend class StopOperation;
-  friend class EmitEventOperation;
+  friend class ObjectSampleCheckpoint;
+  friend class WriteObjectSampleStacktrace;
  private:
   SamplePriorityQueue* _priority_queue;
   SampleList* _list;
@@ -52,20 +57,33 @@
   size_t _total_allocated;
   size_t _threshold;
   size_t _size;
-  volatile int _tryLock;
   bool _dead_samples;
 
+  // Lifecycle
   explicit ObjectSampler(size_t size);
   ~ObjectSampler();
+  static bool create(size_t size);
+  static bool is_created();
+  static ObjectSampler* sampler();
+  static void destroy();
 
-  void add(HeapWord* object, size_t size, JavaThread* thread);
+  // For operations that require exclusive access (non-safepoint)
+  static ObjectSampler* acquire();
+  static void release();
+
+  // Stacktrace
+  static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread);
+  traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread);
+
+  // Sampling
+  static void sample(HeapWord* object, size_t size, JavaThread* thread);
+  void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread);
+  void scavenge();
   void remove_dead(ObjectSample* sample);
-  void scavenge();
 
   // Called by GC
-  void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+  static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 
- public:
   const ObjectSample* item_at(int index) const;
   ObjectSample* item_at(int index);
   int item_count() const;
--- a/src/share/vm/jfr/leakprofiler/startOperation.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/startOperation.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -25,34 +25,17 @@
 #ifndef SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP
 #define SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP
 
-#include "jfr/recorder/jfrRecorder.hpp"
-#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
-#include "runtime/vm_operations.hpp"
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
 
-// Safepoint operation for starting leak profiler object sampler
-class StartOperation : public VM_Operation {
+// Safepoint operation for creating and starting the leak profiler object sampler
+class StartOperation : public OldObjectVMOperation {
  private:
-  jlong _sample_count;
+  int _sample_count;
  public:
-  StartOperation(jlong sample_count) :
-    _sample_count(sample_count) {
-  }
-
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
-
+  StartOperation(int sample_count) : _sample_count(sample_count) {}
   virtual void doit() {
-    assert(!LeakProfiler::is_running(), "invariant");
-    jint queue_size = JfrOptionSet::old_object_queue_size();
-    LeakProfiler::set_object_sampler(new ObjectSampler(queue_size));
-    if (LogJFR && Verbose) tty->print_cr( "Object sampling started");
+    ObjectSampler::create(_sample_count);
   }
 };
 
--- a/src/share/vm/jfr/leakprofiler/stopOperation.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/leakprofiler/stopOperation.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -25,30 +25,14 @@
 #ifndef SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP
 #define SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP
 
-#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
-#include "runtime/vm_operations.hpp"
-
-// Safepoint operation for stopping leak profiler object sampler
-class StopOperation : public VM_Operation {
- public:
-  StopOperation() {}
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
 
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
-
+// Safepoint operation for stopping and destroying the leak profiler object sampler
+class StopOperation : public OldObjectVMOperation {
+ public:
   virtual void doit() {
-    assert(LeakProfiler::is_running(), "invariant");
-    ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-    delete object_sampler;
-    LeakProfiler::set_object_sampler(NULL);
-    if (LogJFR && Verbose) tty->print_cr( "Object sampling stopped");
+    ObjectSampler::destroy();
   }
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/jfr/leakprofiler/utilities/vmOperation.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
+#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
+
+#include "runtime/vm_operations.hpp"
+
+class OldObjectVMOperation : public VM_Operation {
+ public:
+  Mode evaluation_mode() const {
+    return _safepoint;
+  }
+
+  VMOp_Type type() const {
+    return VMOp_JFROldObject;
+  }
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
--- a/src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -316,7 +316,7 @@
 
 void TypeSet::serialize(JfrCheckpointWriter& writer) {
   TypeSetSerialization type_set(false);
-  if (LeakProfiler::is_suspended()) {
+  if (LeakProfiler::is_running()) {
     JfrCheckpointWriter leakp_writer(false, true, Thread::current());
     type_set.write(writer, &leakp_writer);
     ObjectSampleCheckpoint::install(leakp_writer, false, true);
--- a/src/share/vm/jfr/recorder/service/jfrRecorderService.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/recorder/service/jfrRecorderService.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -24,7 +24,9 @@
 
 #include "precompiled.hpp"
 #include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/recorder/jfrRecorder.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
@@ -334,6 +336,7 @@
     open_new_chunk(true);
   }
   _checkpoint_manager.register_service_thread(Thread::current());
+  JfrMetadataEvent::lock();
 }
 
 void JfrRecorderService::open_new_chunk(bool vm_error) {
@@ -397,6 +400,11 @@
   write_stack_trace_checkpoint.process();
 }
 
+static void write_object_sample_stacktrace(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repository) {
+  WriteObjectSampleStacktrace object_sample_stacktrace(sampler, stack_trace_repository);
+  object_sample_stacktrace.process();
+}
+
 static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
   WriteStringPool write_string_pool(string_pool);
   WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
@@ -417,8 +425,9 @@
 //      write checkpoint epoch transition list->
 //        write stack trace checkpoint ->
 //          write string pool checkpoint ->
-//            write storage ->
-//              release stream lock
+//            write object sample stacktraces ->
+//              write storage ->
+//                release stream lock
 //
 void JfrRecorderService::pre_safepoint_write() {
   MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
@@ -427,6 +436,13 @@
   _checkpoint_manager.write_epoch_transition_mspace();
   write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, false);
   write_stringpool_checkpoint(_string_pool, _chunkwriter);
+  if (LeakProfiler::is_running()) {
+    // Exclusive access to the object sampler instance.
+    // The sampler is released (unlocked) later in post_safepoint_write.
+    ObjectSampler* const sampler = ObjectSampler::acquire();
+    assert(sampler != NULL, "invariant");
+    write_object_sample_stacktrace(sampler, _stack_trace_repository);
+  }
   _storage.write();
 }
 
@@ -435,16 +451,10 @@
   VMThread::execute(&safepoint_task);
 }
 
-static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_repository) {
-  WriteObjectSampleStacktrace object_sample_stacktrace(stack_trace_repository);
-  object_sample_stacktrace.process();
-}
-
 //
 // safepoint write sequence
 //
 //   lock stream lock ->
-//     write object sample stacktraces ->
 //       write stacktrace repository ->
 //         write string pool ->
 //           write safepoint dependent types ->
@@ -457,7 +467,6 @@
 void JfrRecorderService::safepoint_write() {
   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
   MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
-  write_object_sample_stacktrace(_stack_trace_repository);
   write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, true);
   write_stringpool_checkpoint_safepoint(_string_pool, _chunkwriter);
   _checkpoint_manager.write_safepoint_types();
@@ -477,13 +486,14 @@
 //
 // post-safepoint write sequence
 //
-//  lock stream lock ->
-//    write type set ->
-//      write checkpoints ->
-//        write metadata event ->
-//          write chunk header ->
-//            close chunk fd ->
-//              release stream lock
+//   write type set ->
+//     release object sampler ->
+//       lock stream lock ->
+//         write checkpoints ->
+//           write metadata event ->
+//             write chunk header ->
+//               close chunk fd ->
+//                 release stream lock
 //
 void JfrRecorderService::post_safepoint_write() {
   assert(_chunkwriter.is_valid(), "invariant");
@@ -492,7 +502,11 @@
   // already tagged artifacts for the previous epoch. We can accomplish this concurrently
   // with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint.
   _checkpoint_manager.write_type_set();
-  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
+  if (LeakProfiler::is_running()) {
+    // The object sampler instance was exclusively acquired and locked in pre_safepoint_write.
+    // Note: There is a dependency on write_type_set() above, ensure the release is subsequent.
+    ObjectSampler::release();
+  }  MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
   // serialize any outstanding checkpoint memory
   _checkpoint_manager.write();
   // serialize the metadata descriptor event and close out the chunk
@@ -511,11 +525,9 @@
 void JfrRecorderService::finalize_current_chunk_on_vm_error() {
   assert(_chunkwriter.is_valid(), "invariant");
   pre_safepoint_write();
-  JfrMetadataEvent::lock();
   // Do not attempt safepoint dependent operations during emergency dump.
   // Optimistically write tagged artifacts.
   _checkpoint_manager.shift_epoch();
-  _checkpoint_manager.write_type_set();
   // update time
   _chunkwriter.time_stamp_chunk_now();
   post_safepoint_write();
--- a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -164,7 +164,13 @@
 }
 
 traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
-  return instance().add_trace(stacktrace);
+  traceid tid = instance().add_trace(stacktrace);
+  if (tid == 0) {
+    stacktrace.resolve_linenos();
+    tid = instance().add_trace(stacktrace);
+  }
+  assert(tid != 0, "invariant");
+  return tid;
 }
 
 traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
@@ -187,54 +193,29 @@
   return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
 }
 
-traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) {
-  assert(thread == Thread::current(), "invariant");
-  JfrThreadLocal* const tl = thread->jfr_thread_local();
-  assert(tl != NULL, "invariant");
-
-  if (tl->has_cached_stack_trace()) {
-    *hash = tl->cached_stack_trace_hash();
-    return tl->cached_stack_trace_id();
-  }
-  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
-    return 0;
-  }
-  JfrStackFrame* frames = tl->stackframes();
-  if (frames == NULL) {
-    // pending oom
-    return 0;
-  }
-  assert(frames != NULL, "invariant");
-  assert(tl->stackframes() == frames, "invariant");
-  return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth(), hash);
-}
-
 traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
   JfrStackTrace stacktrace(frames, max_frames);
-  if (!stacktrace.record_safe(thread, skip)) {
-    return 0;
-  }
-  traceid tid = add(stacktrace);
-  if (tid == 0) {
-    stacktrace.resolve_linenos();
-    tid = add(stacktrace);
-  }
-  return tid;
+  return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
+}
+
+traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread != NULL, "invariant");
+  assert(stacktrace->hash() != 0, "invariant");
+  return add(*stacktrace);
 }
 
-traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) {
-  assert(hash != NULL && *hash == 0, "invariant");
-  JfrStackTrace stacktrace(frames, max_frames);
-  if (!stacktrace.record_safe(thread, skip, true)) {
-    return 0;
+bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip) {
+  assert(thread == Thread::current(), "invariant");
+  assert(stacktrace != NULL, "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash();
+  if (cached_stacktrace_hash != 0) {
+    stacktrace->set_hash(cached_stacktrace_hash);
+    return true;
   }
-  traceid tid = add(stacktrace);
-  if (tid == 0) {
-    stacktrace.resolve_linenos();
-    tid = add(stacktrace);
-  }
-  *hash = stacktrace._hash;
-  return tid;
+  return stacktrace->record_safe(thread, skip, true);
 }
 
 size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
@@ -363,7 +344,7 @@
   return trace;
 }
 
-void JfrStackFrame::resolve_lineno() {
+void JfrStackFrame::resolve_lineno() const {
   assert(_method, "no method pointer");
   assert(_line == 0, "already have linenumber");
   _line = _method->line_number_from_bci(_bci);
@@ -375,7 +356,7 @@
   _frames[frame_pos] = frame;
 }
 
-void JfrStackTrace::resolve_linenos() {
+void JfrStackTrace::resolve_linenos() const {
   for(unsigned int i = 0; i < _nr_of_frames; i++) {
     _frames[i].resolve_lineno();
   }
--- a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -36,9 +36,9 @@
 
 class JfrStackFrame {
  private:
-  const Method* _method;
+  mutable const Method* _method;
   traceid _methodid;
-  int _line;
+  mutable int _line;
   int _bci;
   u1 _type;
 
@@ -58,7 +58,7 @@
   bool equals(const JfrStackFrame& rhs) const;
   void write(JfrChunkWriter& cw) const;
   void write(JfrCheckpointWriter& cpw) const;
-  void resolve_lineno();
+  void resolve_lineno() const;
 };
 
 class JfrStackTrace : public StackObj {
@@ -70,7 +70,7 @@
   unsigned int _hash;
   const u4 _max_frames;
   bool _reached_root;
-  bool _lineno;
+  mutable bool _lineno;
 
  public:
   JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
@@ -82,9 +82,10 @@
                                                         _lineno(false) {}
   bool record_thread(JavaThread& thread, frame& frame);
   bool record_safe(JavaThread* thread, int skip, bool leakp = false);
-  void resolve_linenos();
+  void resolve_linenos() const;
   void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
   void set_hash(unsigned int hash) { _hash = hash; }
+  unsigned int hash() const { return _hash; }
   void set_frame(u4 frame_pos, JfrStackFrame& frame);
   void set_reached_root(bool reached_root) { _reached_root = reached_root; }
   bool full_stacktrace() const { return _reached_root; }
@@ -128,23 +129,26 @@
   traceid _next_id;
   u4 _entries;
 
-  size_t write_impl(JfrChunkWriter& cw, bool clear);
+  traceid add_trace(const JfrStackTrace& stacktrace);
+  static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread);
   traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
-  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash);
-  traceid add_trace(const JfrStackTrace& stacktrace);
+
+  size_t write_impl(JfrChunkWriter& cw, bool clear);
   const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
-
   static void write_metadata(JfrCheckpointWriter& cpw);
 
+  static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip);
+
   JfrStackTraceRepository();
-  static JfrStackTraceRepository& instance();
- public:
   static JfrStackTraceRepository* create();
   bool initialize();
   static void destroy();
+
+  static JfrStackTraceRepository& instance();
+
+ public:
   static traceid add(const JfrStackTrace& stacktrace);
   static traceid record(Thread* thread, int skip = 0);
-  static traceid record(Thread* thread, int skip, unsigned int* hash);
   traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
   size_t write(JfrChunkWriter& cw, bool clear);
   size_t clear();
--- a/src/share/vm/jfr/support/jfrFlush.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/support/jfrFlush.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -48,10 +48,12 @@
 
 template <typename Event>
 class JfrConditionalFlush {
+ protected:
+  bool _enabled;
  public:
   typedef JfrBuffer Type;
-  JfrConditionalFlush(Thread* t) {
-    if (jfr_is_event_enabled(Event::eventId)) {
+  JfrConditionalFlush(Thread* t) : _enabled(jfr_is_event_enabled(Event::eventId)) {
+    if (_enabled) {
       jfr_conditional_flush(Event::eventId, sizeof(Event), t);
     }
   }
@@ -63,7 +65,7 @@
   bool _owner;
  public:
   JfrConditionalFlushWithStacktrace(Thread* t) : JfrConditionalFlush<Event>(t), _t(t), _owner(false) {
-    if (Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
+    if (this->_enabled && Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
       _owner = jfr_save_stacktrace(t);
     }
   }
--- a/src/share/vm/jfr/support/jfrThreadLocal.cpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/support/jfrThreadLocal.cpp	Wed Oct 09 16:11:58 2019 +0800
@@ -150,9 +150,7 @@
 
 JfrStackFrame* JfrThreadLocal::install_stackframes() const {
   assert(_stackframes == NULL, "invariant");
-  _stackdepth = (u4)JfrOptionSet::stackdepth();
-  guarantee(_stackdepth > 0, "Stackdepth must be > 0");
-  _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing);
+  _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing);
   return _stackframes;
 }
 
@@ -163,3 +161,7 @@
 ByteSize JfrThreadLocal::java_event_writer_offset() {
   return in_ByteSize(offset_of(JfrThreadLocal, _java_event_writer));
 }
+
+u4 JfrThreadLocal::stackdepth() const {
+  return _stackdepth != 0 ? _stackdepth : (u4)JfrOptionSet::stackdepth();
+}
--- a/src/share/vm/jfr/support/jfrThreadLocal.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/jfr/support/jfrThreadLocal.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -113,9 +113,7 @@
     _stackframes = frames;
   }
 
-  u4 stackdepth() const {
-    return _stackdepth;
-  }
+  u4 stackdepth() const;
 
   void set_stackdepth(u4 depth) {
     _stackdepth = depth;
--- a/src/share/vm/runtime/vm_operations.hpp	Fri Sep 27 13:23:32 2019 +0800
+++ b/src/share/vm/runtime/vm_operations.hpp	Wed Oct 09 16:11:58 2019 +0800
@@ -98,6 +98,7 @@
   template(RotateGCLog)                           \
   template(WhiteBoxOperation)                     \
   template(ClassLoaderStatsOperation)             \
+  template(JFROldObject)                          \
 
 class VM_Operation: public CHeapObj<mtInternal> {
  public: