changeset 3695:482e282037d7 jdk7u2-b11

Merge
author jcoomes
date Tue, 18 Oct 2011 17:18:37 -0700
parents c407af9f1f59 (current diff) d38fde25cf49 (diff)
children 0418640475c3 68d4d1b6829a
files .hgtags make/hotspot_version src/share/vm/prims/methodHandleWalk.cpp
diffstat 48 files changed, 2680 insertions(+), 1039 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Sep 26 12:06:58 2011 -0700
+++ b/.hgtags	Tue Oct 18 17:18:37 2011 -0700
@@ -201,3 +201,4 @@
 8035e71ac3f6c8a453f7e9483e7144731388b14e jdk7u2-b08
 cd3d4ec354fd040c1f47614991b3fe6d5cc5e9da hs22-b07
 b93bc193d73bd4d07150a3e8f85a8ca4bb18157c jdk7u2-b09
+623aec2a90f721fd0de9877bf7be8624874fd557 hs22-b08
--- a/agent/make/Makefile	Mon Sep 26 12:06:58 2011 -0700
+++ b/agent/make/Makefile	Tue Oct 18 17:18:37 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -81,6 +81,7 @@
 sun.jvm.hotspot.debugger.windbg.x86 \
 sun.jvm.hotspot.debugger.x86 \
 sun.jvm.hotspot.gc_implementation \
+sun.jvm.hotspot.gc_implementation.g1 \
 sun.jvm.hotspot.gc_implementation.parallelScavenge \
 sun.jvm.hotspot.gc_implementation.shared \
 sun.jvm.hotspot.gc_interface \
@@ -167,6 +168,9 @@
 sun/jvm/hotspot/debugger/windbg/ia64/*.java \
 sun/jvm/hotspot/debugger/windbg/x86/*.java \
 sun/jvm/hotspot/debugger/x86/*.java \
+sun/jvm/hotspot/gc_implementation/g1/*.java \
+sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
+sun/jvm/hotspot/gc_implementation/shared/*.java \
 sun/jvm/hotspot/interpreter/*.java \
 sun/jvm/hotspot/jdi/*.java \
 sun/jvm/hotspot/livejvm/*.java \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Tue Oct 18 17:18:37 2011 -0700
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.gc_interface.CollectedHeapName;
+import sun.jvm.hotspot.memory.MemRegion;
+import sun.jvm.hotspot.memory.SharedHeap;
+import sun.jvm.hotspot.memory.SpaceClosure;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1CollectedHeap.
+
+public class G1CollectedHeap extends SharedHeap {
+    // HeapRegionSeq _seq;
+    static private long hrsFieldOffset;
+    // MemRegion _g1_committed;
+    static private long g1CommittedFieldOffset;
+    // size_t _summary_bytes_used;
+    static private CIntegerField summaryBytesUsedField;
+    // G1MonitoringSupport* _g1mm
+    static private AddressField g1mmField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1CollectedHeap");
+
+        hrsFieldOffset = type.getField("_hrs").getOffset();
+        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
+        summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+        g1mmField = type.getAddressField("_g1mm");
+    }
+
+    public long capacity() {
+        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
+        MemRegion g1_committed = new MemRegion(g1CommittedAddr);
+        return g1_committed.byteSize();
+    }
+
+    public long used() {
+        return summaryBytesUsedField.getValue(addr);
+    }
+
+    public long n_regions() {
+        return hrs().length();
+    }
+
+    private HeapRegionSeq hrs() {
+        Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
+        return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
+                                                         hrsAddr);
+    }
+
+    public G1MonitoringSupport g1mm() {
+        Address g1mmAddr = g1mmField.getValue(addr);
+        return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
+    }
+
+    private Iterator<HeapRegion> heapRegionIterator() {
+        return hrs().heapRegionIterator();
+    }
+
+    public void heapRegionIterate(SpaceClosure scl) {
+        Iterator<HeapRegion> iter = heapRegionIterator();
+        while (iter.hasNext()) {
+            HeapRegion hr = iter.next();
+            scl.doSpace(hr);
+        }
+    }
+
+    public CollectedHeapName kind() {
+        return CollectedHeapName.G1_COLLECTED_HEAP;
+    }
+
+    public G1CollectedHeap(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Tue Oct 18 17:18:37 2011 -0700
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1MonitoringSupport.
+
+public class G1MonitoringSupport extends VMObject {
+    // size_t _eden_committed;
+    static private CIntegerField edenCommittedField;
+    // size_t _eden_used;
+    static private CIntegerField edenUsedField;
+    // size_t _survivor_committed;
+    static private CIntegerField survivorCommittedField;
+    // size_t _survivor_used;
+    static private CIntegerField survivorUsedField;
+    // size_t _old_committed;
+    static private CIntegerField oldCommittedField;
+    // size_t _old_used;
+    static private CIntegerField oldUsedField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1MonitoringSupport");
+
+        edenCommittedField = type.getCIntegerField("_eden_committed");
+        edenUsedField = type.getCIntegerField("_eden_used");
+        survivorCommittedField = type.getCIntegerField("_survivor_committed");
+        survivorUsedField = type.getCIntegerField("_survivor_used");
+        oldCommittedField = type.getCIntegerField("_old_committed");
+        oldUsedField = type.getCIntegerField("_old_used");
+    }
+
+    public long edenCommitted() {
+        return edenCommittedField.getValue(addr);
+    }
+
+    public long edenUsed() {
+        return edenUsedField.getValue(addr);
+    }
+
+    public long survivorCommitted() {
+        return survivorCommittedField.getValue(addr);
+    }
+
+    public long survivorUsed() {
+        return survivorUsedField.getValue(addr);
+    }
+
+    public long oldCommitted() {
+        return oldCommittedField.getValue(addr);
+    }
+
+    public long oldUsed() {
+        return oldUsedField.getValue(addr);
+    }
+
+    public G1MonitoringSupport(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Tue Oct 18 17:18:37 2011 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.memory.ContiguousSpace;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegion. Currently we don't actually include
+// any of its fields but only iterate over it (which we get "for free"
+// as HeapRegion ultimately inherits from ContiguousSpace).
+
+public class HeapRegion extends ContiguousSpace {
+    // static int GrainBytes;
+    static private CIntegerField grainBytesField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegion");
+
+        grainBytesField = type.getCIntegerField("GrainBytes");
+    }
+
+    static public long grainBytes() {
+        return grainBytesField.getValue();
+    }
+
+    public HeapRegion(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Tue Oct 18 17:18:37 2011 -0700
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
+
+public class HeapRegionSeq extends VMObject {
+    // HeapRegion** _regions;
+    static private AddressField regionsField;
+    // size_t _length;
+    static private CIntegerField lengthField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegionSeq");
+
+        regionsField = type.getAddressField("_regions");
+        lengthField = type.getCIntegerField("_length");
+    }
+
+    private HeapRegion at(long index) {
+        Address arrayAddr = regionsField.getValue(addr);
+        // Offset of &_region[index]
+        long offset = index * VM.getVM().getAddressSize();
+        Address regionAddr = arrayAddr.getAddressAt(offset);
+        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
+                                                      regionAddr);
+    }
+
+    public long length() {
+        return lengthField.getValue(addr);
+    }
+
+    private class HeapRegionIterator implements Iterator<HeapRegion> {
+        private long index;
+        private long length;
+
+        @Override
+        public boolean hasNext() { return index < length; }
+
+        @Override
+        public HeapRegion next() { return at(index++);    }
+
+        @Override
+        public void remove()     { /* not supported */    }
+
+        HeapRegionIterator(Address addr) {
+            index = 0;
+            length = length();
+        }
+    }
+
+    public Iterator<HeapRegion> heapRegionIterator() {
+        return new HeapRegionIterator(addr);
+    }
+
+    public HeapRegionSeq(Address addr) {
+        super(addr);
+    }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Mon Sep 26 12:06:58 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Tue Oct 18 17:18:37 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
   public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
   public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
   public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
+  public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
   public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
 
   public String toString() {
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Mon Sep 26 12:06:58 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Tue Oct 18 17:18:37 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.G1CollectedHeap;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.types.*;
@@ -72,6 +73,7 @@
     heapConstructor = new VirtualConstructor(db);
     heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class);
     heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
+    heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
 
     mainThreadGroupField   = type.getOopField("_main_thread_group");
     systemThreadGroupField = type.getOopField("_system_thread_group");
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Mon Sep 26 12:06:58 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Tue Oct 18 17:18:37 2011 -0700
@@ -44,14 +44,14 @@
   }
 
   // field offset constants
-  public static int ACCESS_FLAGS_OFFSET;
-  public static int NAME_INDEX_OFFSET;
-  public static int SIGNATURE_INDEX_OFFSET;
-  public static int INITVAL_INDEX_OFFSET;
-  public static int LOW_OFFSET;
-  public static int HIGH_OFFSET;
-  public static int GENERIC_SIGNATURE_INDEX_OFFSET;
-  public static int FIELD_SLOTS;
+  private static int ACCESS_FLAGS_OFFSET;
+  private static int NAME_INDEX_OFFSET;
+  private static int SIGNATURE_INDEX_OFFSET;
+  private static int INITVAL_INDEX_OFFSET;
+  private static int LOW_OFFSET;
+  private static int HIGH_OFFSET;
+  private static int GENERIC_SIGNATURE_INDEX_OFFSET;
+  private static int FIELD_SLOTS;
   public static int IMPLEMENTORS_LIMIT;
 
   // ClassState constants
@@ -122,6 +122,13 @@
 
   InstanceKlass(OopHandle handle, ObjectHeap heap) {
     super(handle, heap);
+    if (getJavaFieldsCount() != getAllFieldsCount()) {
+      // Exercise the injected field logic
+      for (int i = getJavaFieldsCount(); i < getAllFieldsCount(); i++) {
+        getFieldName(i);
+        getFieldSignature(i);
+      }
+    }
   }
 
   private static OopField  arrayKlasses;
@@ -253,24 +260,51 @@
     return getFields().getShortAt(index * FIELD_SLOTS + ACCESS_FLAGS_OFFSET);
   }
 
+  public short getFieldNameIndex(int index) {
+    if (index >= getJavaFieldsCount()) throw new IndexOutOfBoundsException("not a Java field;");
+    return getFields().getShortAt(index * FIELD_SLOTS + NAME_INDEX_OFFSET);
+  }
+
   public Symbol getFieldName(int index) {
     int nameIndex = getFields().getShortAt(index * FIELD_SLOTS + NAME_INDEX_OFFSET);
-    return getConstants().getSymbolAt(nameIndex);
+    if (index < getJavaFieldsCount()) {
+      return getConstants().getSymbolAt(nameIndex);
+    } else {
+      return vmSymbols.symbolAt(nameIndex);
+    }
+  }
+
+  public short getFieldSignatureIndex(int index) {
+    if (index >= getJavaFieldsCount()) throw new IndexOutOfBoundsException("not a Java field;");
+    return getFields().getShortAt(index * FIELD_SLOTS + SIGNATURE_INDEX_OFFSET);
   }
 
   public Symbol getFieldSignature(int index) {
     int signatureIndex = getFields().getShortAt(index * FIELD_SLOTS + SIGNATURE_INDEX_OFFSET);
-    return getConstants().getSymbolAt(signatureIndex);
+    if (index < getJavaFieldsCount()) {
+      return getConstants().getSymbolAt(signatureIndex);
+    } else {
+      return vmSymbols.symbolAt(signatureIndex);
+    }
+  }
+
+  public short getFieldGenericSignatureIndex(int index) {
+    return getFields().getShortAt(index * FIELD_SLOTS + GENERIC_SIGNATURE_INDEX_OFFSET);
   }
 
   public Symbol getFieldGenericSignature(int index) {
-    short genericSignatureIndex = getFields().getShortAt(index * FIELD_SLOTS + GENERIC_SIGNATURE_INDEX_OFFSET);
+    short genericSignatureIndex = getFieldGenericSignatureIndex(index);
     if (genericSignatureIndex != 0)  {
       return getConstants().getSymbolAt(genericSignatureIndex);
     }
     return null;
   }
 
+  public short getFieldInitialValueIndex(int index) {
+    if (index >= getJavaFieldsCount()) throw new IndexOutOfBoundsException("not a Java field;");
+    return getFields().getShortAt(index * FIELD_SLOTS + INITVAL_INDEX_OFFSET);
+  }
+
   public int getFieldOffset(int index) {
     TypeArray fields = getFields();
     return VM.getVM().buildIntFromShorts(fields.getShortAt(index * FIELD_SLOTS + LOW_OFFSET),
@@ -288,7 +322,7 @@
   public Klass     getImplementor(int i)    { return (Klass)        implementors[i].getValue(this); }
   public TypeArray getFields()              { return (TypeArray)    fields.getValue(this); }
   public int       getJavaFieldsCount()     { return                (int) javaFieldsCount.getValue(this); }
-  public int       getAllFieldsCount()      { return                (int)getFields().getLength(); }
+  public int       getAllFieldsCount()      { return                (int)getFields().getLength() / FIELD_SLOTS; }
   public ConstantPool getConstants()        { return (ConstantPool) constants.getValue(this); }
   public Oop       getClassLoader()         { return                classLoader.getValue(this); }
   public Oop       getProtectionDomain()    { return                protectionDomain.getValue(this); }
@@ -511,7 +545,6 @@
   }
 
   void iterateStaticFieldsInternal(OopVisitor visitor) {
-    TypeArray fields = getFields();
     int length = getJavaFieldsCount();
     for (int index = 0; index < length; index++) {
       short accessFlags    = getFieldAccessFlags(index);
@@ -541,8 +574,6 @@
     if (getSuper() != null) {
       ((InstanceKlass) getSuper()).iterateNonStaticFields(visitor, obj);
     }
-    TypeArray fields = getFields();
-
     int length = getJavaFieldsCount();
     for (int index = 0; index < length; index++) {
       short accessFlags    = getFieldAccessFlags(index);
@@ -556,9 +587,7 @@
 
   /** Field access by name. */
   public Field findLocalField(Symbol name, Symbol sig) {
-    TypeArray fields = getFields();
-    int length = (int) fields.getLength();
-    ConstantPool cp = getConstants();
+    int length = getJavaFieldsCount();
     for (int i = 0; i < length; i++) {
       Symbol f_name = getFieldName(i);
       Symbol f_sig  = getFieldSignature(i);
@@ -648,8 +677,6 @@
     public List getImmediateFields() {
         // A list of Fields for each field declared in this class/interface,
         // not including inherited fields.
-        TypeArray fields = getFields();
-
         int length = getJavaFieldsCount();
         List immediateFields = new ArrayList(length);
         for (int index = 0; index < length; index++) {
@@ -839,7 +866,6 @@
 
   // Creates new field from index in fields TypeArray
   private Field newField(int index) {
-    TypeArray fields = getFields();
     FieldType type = new FieldType(getFieldSignature(index));
     if (type.isOop()) {
      if (VM.getVM().isCompressedOopsEnabled()) {
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Mon Sep 26 12:06:58 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Tue Oct 18 17:18:37 2011 -0700
@@ -33,6 +33,7 @@
 
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.*;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.runtime.*;
@@ -514,9 +515,16 @@
 
   private void addPermGenLiveRegions(List output, CollectedHeap heap) {
     LiveRegionsCollector lrc = new LiveRegionsCollector(output);
-    if (heap instanceof GenCollectedHeap) {
-       GenCollectedHeap genHeap = (GenCollectedHeap) heap;
-       Generation gen = genHeap.permGen();
+    if (heap instanceof SharedHeap) {
+       if (Assert.ASSERTS_ENABLED) {
+          Assert.that(heap instanceof GenCollectedHeap ||
+                      heap instanceof G1CollectedHeap,
+                      "Expecting GenCollectedHeap or G1CollectedHeap, " +
+                      "but got " + heap.getClass().getName());
+       }
+       // Handles both GenCollectedHeap and G1CollectedHeap
+       SharedHeap sharedHeap = (SharedHeap) heap;
+       Generation gen = sharedHeap.permGen();
        gen.spaceIterate(lrc, true);
     } else if (heap instanceof ParallelScavengeHeap) {
        ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
@@ -524,8 +532,9 @@
        addLiveRegions(permGen.objectSpace().getLiveRegions(), output);
     } else {
        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
-                             heap.getClass().getName());
+          Assert.that(false,
+                      "Expecting SharedHeap or ParallelScavengeHeap, " +
+                      "but got " + heap.getClass().getName());
        }
     }
   }
@@ -588,10 +597,14 @@
        addLiveRegions(youngGen.fromSpace().getLiveRegions(), liveRegions);
        PSOldGen oldGen = psh.oldGen();
        addLiveRegions(oldGen.objectSpace().getLiveRegions(), liveRegions);
+    } else if (heap instanceof G1CollectedHeap) {
+        G1CollectedHeap g1h = (G1CollectedHeap) heap;
+        g1h.heapRegionIterate(lrc);
     } else {
        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
-                              heap.getClass().getName());
+          Assert.that(false, "Expecting GenCollectedHeap, G1CollectedHeap, " +
+                      "or ParallelScavengeHeap, but got " +
+                      heap.getClass().getName());
        }
     }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/vmSymbols.java	Tue Oct 18 17:18:37 2011 -0700
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.memory.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+
+public class vmSymbols {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static Address symbolsAddress;
+  private static int FIRST_SID;
+  private static int SID_LIMIT;
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type            = db.lookupType("vmSymbols");
+    symbolsAddress       = type.getAddressField("_symbols[0]").getStaticFieldAddress();
+    FIRST_SID            = db.lookupIntConstant("vmSymbols::FIRST_SID");
+    SID_LIMIT            = db.lookupIntConstant("vmSymbols::SID_LIMIT");
+  }
+
+  public static Symbol symbolAt(int id) {
+    if (id < FIRST_SID || id >= SID_LIMIT) throw new IndexOutOfBoundsException("bad SID " + id);
+    return Symbol.create(symbolsAddress.getAddressAt(id * VM.getVM().getAddressSize()));
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Mon Sep 26 12:06:58 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Tue Oct 18 17:18:37 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,11 @@
 
 import java.util.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.*;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.gc_implementation.shared.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.tools.*;
 
 public class HeapSummary extends Tool {
 
@@ -70,32 +70,45 @@
       System.out.println();
       System.out.println("Heap Usage:");
 
-      if (heap instanceof GenCollectedHeap) {
-         GenCollectedHeap genHeap = (GenCollectedHeap) heap;
-         for (int n = 0; n < genHeap.nGens(); n++) {
-            Generation gen = genHeap.getGen(n);
-            if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
-               System.out.println("New Generation (Eden + 1 Survivor Space):");
-               printGen(gen);
+      if (heap instanceof SharedHeap) {
+         SharedHeap sharedHeap = (SharedHeap) heap;
+         if (sharedHeap instanceof GenCollectedHeap) {
+            GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
+            for (int n = 0; n < genHeap.nGens(); n++) {
+               Generation gen = genHeap.getGen(n);
+               if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
+                  System.out.println("New Generation (Eden + 1 Survivor Space):");
+                  printGen(gen);
 
-               ContiguousSpace eden = ((DefNewGeneration)gen).eden();
-               System.out.println("Eden Space:");
-               printSpace(eden);
+                  ContiguousSpace eden = ((DefNewGeneration)gen).eden();
+                  System.out.println("Eden Space:");
+                  printSpace(eden);
+
+                  ContiguousSpace from = ((DefNewGeneration)gen).from();
+                  System.out.println("From Space:");
+                  printSpace(from);
 
-               ContiguousSpace from = ((DefNewGeneration)gen).from();
-               System.out.println("From Space:");
-               printSpace(from);
-
-               ContiguousSpace to = ((DefNewGeneration)gen).to();
-               System.out.println("To Space:");
-               printSpace(to);
-            } else {
-               System.out.println(gen.name() + ":");
-               printGen(gen);
+                  ContiguousSpace to = ((DefNewGeneration)gen).to();
+                  System.out.println("To Space:");
+                  printSpace(to);
+               } else {
+                  System.out.println(gen.name() + ":");
+                  printGen(gen);
+               }
             }
+         } else if (sharedHeap instanceof G1CollectedHeap) {
+             G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
+             G1MonitoringSupport g1mm = g1h.g1mm();
+             System.out.println("G1 Young Generation");
+             printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
+             printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
+             printG1Space("To Space:", 0, 0);
+             printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
+         } else {
+             throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
          }
-         // Perm generation
-         Generation permGen = genHeap.permGen();
+         // Perm generation shared by the above
+         Generation permGen = sharedHeap.permGen();
          System.out.println("Perm Generation:");
          printGen(permGen);
       } else if (heap instanceof ParallelScavengeHeap) {
@@ -119,7 +132,7 @@
          printValMB("free     = ", permFree);
          System.out.println(alignment + (double)permGen.used() * 100.0 / permGen.capacity() + "% used");
       } else {
-         throw new RuntimeException("unknown heap type : " + heap.getClass());
+         throw new RuntimeException("unknown CollectedHeap type : " + heap.getClass());
       }
    }
 
@@ -151,6 +164,14 @@
           return;
        }
 
+       l = getFlagValue("UseG1GC", flagMap);
+       if (l == 1L) {
+           System.out.print("Garbage-First (G1) GC ");
+           l = getFlagValue("ParallelGCThreads", flagMap);
+           System.out.println("with " + l + " thread(s)");
+           return;
+       }
+
        System.out.println("Mark Sweep Compact GC");
    }
 
@@ -191,6 +212,16 @@
       System.out.println(alignment +  (double)space.used() * 100.0 / space.capacity() + "% used");
    }
 
+   private void printG1Space(String spaceName, long used, long capacity) {
+      long free = capacity - used;
+      System.out.println(spaceName);
+      printValMB("capacity = ", capacity);
+      printValMB("used     = ", used);
+      printValMB("free     = ", free);
+      double occPerc = (capacity > 0) ? (double) used * 100.0 / capacity : 0.0;
+      System.out.println(alignment + occPerc + "% used");
+   }
+
    private static final double FACTOR = 1024*1024;
    private void printValMB(String title, long value) {
       if (value < 0) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Mon Sep 26 12:06:58 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Tue Oct 18 17:18:37 2011 -0700
@@ -379,23 +379,21 @@
     }
 
     protected void writeFields() throws IOException {
-        TypeArray fields = klass.getFields();
         final int length = klass.getJavaFieldsCount();
 
         // write number of fields
-        dos.writeShort((short) (length / InstanceKlass.FIELD_SLOTS) );
+        dos.writeShort((short) length);
 
-        if (DEBUG) debugMessage("number of fields = "
-                                + length/InstanceKlass.FIELD_SLOTS);
+        if (DEBUG) debugMessage("number of fields = " + length);
 
-        for (int index = 0; index < length; index += InstanceKlass.FIELD_SLOTS) {
-            short accessFlags    = fields.getShortAt(index + InstanceKlass.ACCESS_FLAGS_OFFSET);
+        for (int index = 0; index < length; index++) {
+            short accessFlags    = klass.getFieldAccessFlags(index);
             dos.writeShort(accessFlags & (short) JVM_RECOGNIZED_FIELD_MODIFIERS);
 
-            short nameIndex    = fields.getShortAt(index + InstanceKlass.NAME_INDEX_OFFSET);
+            short nameIndex    = klass.getFieldNameIndex(index);
             dos.writeShort(nameIndex);
 
-            short signatureIndex = fields.getShortAt(index + InstanceKlass.SIGNATURE_INDEX_OFFSET);
+            short signatureIndex = klass.getFieldSignatureIndex(index);
             dos.writeShort(signatureIndex);
             if (DEBUG) debugMessage("\tfield name = " + nameIndex + ", signature = " + signatureIndex);
 
@@ -404,11 +402,11 @@
             if (hasSyn)
                 fieldAttributeCount++;
 
-            short initvalIndex = fields.getShortAt(index + InstanceKlass.INITVAL_INDEX_OFFSET);
+            short initvalIndex = klass.getFieldInitialValueIndex(index);
             if (initvalIndex != 0)
                 fieldAttributeCount++;
 
-            short genSigIndex = fields.getShortAt(index + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET);
+            short genSigIndex = klass.getFieldGenericSignatureIndex(index);
             if (genSigIndex != 0)
                 fieldAttributeCount++;
 
--- a/make/hotspot_version	Mon Sep 26 12:06:58 2011 -0700
+++ b/make/hotspot_version	Tue Oct 18 17:18:37 2011 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=22
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=07
+HS_BUILD_NUMBER=08
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/sa.files	Mon Sep 26 12:06:58 2011 -0700
+++ b/make/sa.files	Tue Oct 18 17:18:37 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -79,6 +79,7 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/g1/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_interface/*.java \
--- a/src/share/vm/c1/c1_LinearScan.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -2619,6 +2619,24 @@
 
     Location::Type loc_type = float_saved_as_double ? Location::float_in_dbl : Location::normal;
     VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr());
+#ifndef __SOFTFP__
+#ifndef VM_LITTLE_ENDIAN
+    if (! float_saved_as_double) {
+      // On big endian system, we may have an issue if float registers use only
+      // the low half of the (same) double registers.
+      // Both the float and the double could have the same regnr but would correspond
+      // to two different addresses once saved.
+
+      // get next safely (no assertion checks)
+      VMReg next = VMRegImpl::as_VMReg(1+rname->value());
+      if (next->is_reg() &&
+          (next->as_FloatRegister() == rname->as_FloatRegister())) {
+        // the back-end does use the same numbering for the double and the float
+        rname = next; // VMReg for the low bits, e.g. the real VMReg for the float
+      }
+    }
+#endif
+#endif
     LocationValue* sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
 
     scope_values->append(sv);
--- a/src/share/vm/ci/ciEnv.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -910,27 +910,37 @@
   // dependencies were inserted.  Any violated dependences in this
   // case are dumped to the tty.
   bool counter_changed = system_dictionary_modification_counter_changed();
-  bool test_deps = counter_changed;
-  DEBUG_ONLY(test_deps = true);
-  if (!test_deps)  return;
 
-  bool print_failures = false;
-  DEBUG_ONLY(print_failures = !counter_changed);
-  bool keep_going = (print_failures || xtty != NULL);
+  bool verify_deps = trueInDebug;
+  if (!counter_changed && !verify_deps)  return;
+
   int klass_violations = 0;
-
   for (Dependencies::DepStream deps(dependencies()); deps.next(); ) {
     if (!deps.is_klass_type())  continue;  // skip non-klass dependencies
     klassOop witness = deps.check_dependency();
     if (witness != NULL) {
       klass_violations++;
-      if (print_failures)  deps.print_dependency(witness, /*verbose=*/ true);
+      if (!counter_changed) {
+        // Dependence failed but counter didn't change.  Log a message
+        // describing what failed and allow the assert at the end to
+        // trigger.
+        deps.print_dependency(witness);
+      } else if (xtty == NULL) {
+        // If we're not logging then a single violation is sufficient,
+        // otherwise we want to log all the dependences which were
+        // violated.
+        break;
+      }
     }
-    // If there's no log and we're not sanity-checking, we're done.
-    if (!keep_going)  break;
   }
 
   if (klass_violations != 0) {
+#ifdef ASSERT
+    if (!counter_changed && !PrintCompilation) {
+      // Print out the compile task that failed
+      _task->print_line();
+    }
+#endif
     assert(counter_changed, "failed dependencies, but counter didn't change");
     record_failure("concurrent class loading");
   }
--- a/src/share/vm/classfile/vmSymbols.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -967,7 +967,8 @@
 // Class vmSymbols
 
 class vmSymbols: AllStatic {
- friend class vmIntrinsics;
+  friend class vmIntrinsics;
+  friend class VMStructs;
  public:
   // enum for figuring positions and size of array holding Symbol*s
   enum SID {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -2004,7 +2004,7 @@
   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
 
   ref_processor()->set_enqueuing_is_done(false);
-  ref_processor()->enable_discovery();
+  ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
   ref_processor()->setup_policy(clear_all_soft_refs);
   // If an asynchronous collection finishes, the _modUnionTable is
   // all clear.  If we are assuming the collection from an asynchronous
@@ -3490,8 +3490,8 @@
     MutexLockerEx x(bitMapLock(),
                     Mutex::_no_safepoint_check_flag);
     checkpointRootsInitialWork(asynch);
-    rp->verify_no_references_recorded();
-    rp->enable_discovery(); // enable ("weak") refs discovery
+    // enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
     _collectorState = Marking;
   } else {
     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
@@ -3503,7 +3503,8 @@
            "ref discovery for this generation kind");
     // already have locks
     checkpointRootsInitialWork(asynch);
-    rp->enable_discovery(); // now enable ("weak") refs discovery
+    // now enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
     _collectorState = Marking;
   }
   SpecializationStats::print();
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -818,10 +818,10 @@
   NoteStartOfMarkHRClosure startcl;
   g1h->heap_region_iterate(&startcl);
 
-  // Start weak-reference discovery.
-  ReferenceProcessor* rp = g1h->ref_processor();
-  rp->verify_no_references_recorded();
-  rp->enable_discovery(); // enable ("weak") refs discovery
+  // Start Concurrent Marking weak-reference discovery.
+  ReferenceProcessor* rp = g1h->ref_processor_cm();
+  // enable ("weak") refs discovery
+  rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@@ -1133,6 +1133,7 @@
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
+
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If a full collection has happened, we shouldn't do this.
@@ -1837,6 +1838,10 @@
   size_t cleaned_up_bytes = start_used_bytes - g1h->used();
   g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
 
+  // Clean up will have freed any regions completely full of garbage.
+  // Update the soft reference policy with the new heap occupancy.
+  Universe::update_heap_info_at_gc();
+
   // We need to make this be a "collection" so any collection pause that
   // races with it goes around and waits for completeCleanup to finish.
   g1h->increment_total_collections();
@@ -2072,8 +2077,10 @@
   }
 };
 
-// Implementation of AbstractRefProcTaskExecutor for G1
-class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+// Implementation of AbstractRefProcTaskExecutor for parallel
+// reference processing at the end of G1 concurrent marking
+
+class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 private:
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
@@ -2082,7 +2089,7 @@
   int              _active_workers;
 
 public:
-  G1RefProcTaskExecutor(G1CollectedHeap* g1h,
+  G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
                         ConcurrentMark* cm,
                         CMBitMap* bitmap,
                         WorkGang* workers,
@@ -2096,7 +2103,7 @@
   virtual void execute(EnqueueTask& task);
 };
 
-class G1RefProcTaskProxy: public AbstractGangTask {
+class G1CMRefProcTaskProxy: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   ProcessTask&     _proc_task;
   G1CollectedHeap* _g1h;
@@ -2104,7 +2111,7 @@
   CMBitMap*        _bitmap;
 
 public:
-  G1RefProcTaskProxy(ProcessTask& proc_task,
+  G1CMRefProcTaskProxy(ProcessTask& proc_task,
                      G1CollectedHeap* g1h,
                      ConcurrentMark* cm,
                      CMBitMap* bitmap) :
@@ -2122,10 +2129,10 @@
   }
 };
 
-void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
+void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
 
-  G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
+  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
 
   // We need to reset the phase for each task execution so that
   // the termination protocol of CMTask::do_marking_step works.
@@ -2135,12 +2142,12 @@
   _g1h->set_par_threads(0);
 }
 
-class G1RefEnqueueTaskProxy: public AbstractGangTask {
+class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   EnqueueTask& _enq_task;
 
 public:
-  G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
+  G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
     AbstractGangTask("Enqueue reference objects in parallel"),
     _enq_task(enq_task)
   { }
@@ -2150,10 +2157,10 @@
   }
 };
 
-void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
+void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
 
-  G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
+  G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
 
   _g1h->set_par_threads(_active_workers);
   _workers->run_task(&enq_task_proxy);
@@ -2163,71 +2170,84 @@
 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   ResourceMark rm;
   HandleMark   hm;
-  G1CollectedHeap* g1h   = G1CollectedHeap::heap();
-  ReferenceProcessor* rp = g1h->ref_processor();
-
-  // See the comment in G1CollectedHeap::ref_processing_init()
-  // about how reference processing currently works in G1.
-
-  // Process weak references.
-  rp->setup_policy(clear_all_soft_refs);
-  assert(_markStack.isEmpty(), "mark stack should be empty");
-
-  G1CMIsAliveClosure   g1_is_alive(g1h);
-  G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
-  G1CMDrainMarkingStackClosure
-    g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
-  // We use the work gang from the G1CollectedHeap and we utilize all
-  // the worker threads.
-  int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
-  active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
-
-  G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
-                                          g1h->workers(), active_workers);
-
-
-  if (rp->processing_is_mt()) {
-    // Set the degree of MT here.  If the discovery is done MT, there
-    // may have been a different number of threads doing the discovery
-    // and a different number of discovered lists may have Ref objects.
-    // That is OK as long as the Reference lists are balanced (see
-    // balance_all_queues() and balance_queues()).
-    rp->set_active_mt_degree(active_workers);
-
-    rp->process_discovered_references(&g1_is_alive,
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  // Is alive closure.
+  G1CMIsAliveClosure g1_is_alive(g1h);
+
+  // Inner scope to exclude the cleaning of the string and symbol
+  // tables from the displayed time.
+  {
+    bool verbose = PrintGC && PrintGCDetails;
+    if (verbose) {
+      gclog_or_tty->put(' ');
+    }
+    TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
+
+    ReferenceProcessor* rp = g1h->ref_processor_cm();
+
+    // See the comment in G1CollectedHeap::ref_processing_init()
+    // about how reference processing currently works in G1.
+
+    // Process weak references.
+    rp->setup_policy(clear_all_soft_refs);
+    assert(_markStack.isEmpty(), "mark stack should be empty");
+
+    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
+    G1CMDrainMarkingStackClosure
+      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
+
+    // We use the work gang from the G1CollectedHeap and we utilize all
+    // the worker threads.
+    int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
+    active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
+
+    G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
+                                              g1h->workers(), active_workers);
+
+    if (rp->processing_is_mt()) {
+      // Set the degree of MT here.  If the discovery is done MT, there
+      // may have been a different number of threads doing the discovery
+      // and a different number of discovered lists may have Ref objects.
+      // That is OK as long as the Reference lists are balanced (see
+      // balance_all_queues() and balance_queues()).
+      rp->set_active_mt_degree(active_workers);
+
+      rp->process_discovered_references(&g1_is_alive,
                                       &g1_keep_alive,
                                       &g1_drain_mark_stack,
                                       &par_task_executor);
 
-    // The work routines of the parallel keep_alive and drain_marking_stack
-    // will set the has_overflown flag if we overflow the global marking
-    // stack.
-  } else {
-    rp->process_discovered_references(&g1_is_alive,
-                                      &g1_keep_alive,
-                                      &g1_drain_mark_stack,
-                                      NULL);
-
+      // The work routines of the parallel keep_alive and drain_marking_stack
+      // will set the has_overflown flag if we overflow the global marking
+      // stack.
+    } else {
+      rp->process_discovered_references(&g1_is_alive,
+                                        &g1_keep_alive,
+                                        &g1_drain_mark_stack,
+                                        NULL);
+    }
+
+    assert(_markStack.overflow() || _markStack.isEmpty(),
+            "mark stack should be empty (unless it overflowed)");
+    if (_markStack.overflow()) {
+      // Should have been done already when we tried to push an
+      // entry on to the global mark stack. But let's do it again.
+      set_has_overflown();
+    }
+
+    if (rp->processing_is_mt()) {
+      assert(rp->num_q() == active_workers, "why not");
+      rp->enqueue_discovered_references(&par_task_executor);
+    } else {
+      rp->enqueue_discovered_references();
+    }
+
+    rp->verify_no_references_recorded();
+    assert(!rp->discovery_enabled(), "Post condition");
   }
 
-  assert(_markStack.overflow() || _markStack.isEmpty(),
-      "mark stack should be empty (unless it overflowed)");
-  if (_markStack.overflow()) {
-    // Should have been done already when we tried to push an
-    // entry on to the global mark stack. But let's do it again.
-    set_has_overflown();
-  }
-
-  if (rp->processing_is_mt()) {
-    assert(rp->num_q() == active_workers, "why not");
-    rp->enqueue_discovered_references(&par_task_executor);
-  } else {
-    rp->enqueue_discovered_references();
-  }
-
-  rp->verify_no_references_recorded();
-  assert(!rp->discovery_enabled(), "should have been disabled");
-
   // Now clean up stale oops in StringTable
   StringTable::unlink(&g1_is_alive);
   // Clean up unreferenced symbols in symbol table.
@@ -3329,7 +3349,7 @@
   assert(_ref_processor == NULL, "should be initialized to NULL");
 
   if (G1UseConcMarkReferenceProcessing) {
-    _ref_processor = g1h->ref_processor();
+    _ref_processor = g1h->ref_processor_cm();
     assert(_ref_processor != NULL, "should not be NULL");
   }
 }
@@ -4553,7 +4573,7 @@
                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
                  g1_committed.start(), g1_committed.end(),
                  g1_reserved.start(), g1_reserved.end(),
-                 HeapRegion::GrainBytes);
+                 (size_t)HeapRegion::GrainBytes);
   _out->print_cr(G1PPRL_LINE_PREFIX);
   _out->print_cr(G1PPRL_LINE_PREFIX
                  G1PPRL_TYPE_H_FORMAT
@@ -4564,6 +4584,15 @@
                  G1PPRL_DOUBLE_H_FORMAT,
                  "type", "address-range",
                  "used", "prev-live", "next-live", "gc-eff");
+  _out->print_cr(G1PPRL_LINE_PREFIX
+                 G1PPRL_TYPE_H_FORMAT
+                 G1PPRL_ADDR_BASE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_DOUBLE_H_FORMAT,
+                 "", "",
+                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
 }
 
 // It takes as a parameter a reference to one of the _hum_* fields, it
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -366,8 +366,8 @@
   friend class CMConcurrentMarkingTask;
   friend class G1ParNoteEndTask;
   friend class CalcLiveObjectsClosure;
-  friend class G1RefProcTaskProxy;
-  friend class G1RefProcTaskExecutor;
+  friend class G1CMRefProcTaskProxy;
+  friend class G1CMRefProcTaskExecutor;
   friend class G1CMParKeepAliveAndDrainClosure;
   friend class G1CMParDrainMarkingStackClosure;
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -42,6 +42,7 @@
 #include "memory/gcLocker.inline.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/generationSpec.hpp"
+#include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
 #include "runtime/aprofiler.hpp"
@@ -815,6 +816,11 @@
     result =
       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
     assert(result != NULL, "it should always return a valid result");
+
+    // A successful humongous object allocation changes the used space
+    // information of the old generation so we need to recalculate the
+    // sizes and update the jstat counters here.
+    g1mm()->update_sizes();
   }
 
   verify_region_sets_optional();
@@ -1244,15 +1250,11 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    // We want to discover references, but not process them yet.
-    // This mode is disabled in
-    // instanceRefKlass::process_discovered_references if the
-    // generation does some collection work, or
-    // instanceRefKlass::enqueue_discovered_references if the
-    // generation returns without doing any work.
-    ref_processor()->disable_discovery();
-    ref_processor()->abandon_partial_discovery();
-    ref_processor()->verify_no_references_recorded();
+    // Disable discovery and empty the discovered lists
+    // for the CM ref processor.
+    ref_processor_cm()->disable_discovery();
+    ref_processor_cm()->abandon_partial_discovery();
+    ref_processor_cm()->verify_no_references_recorded();
 
     // Abandon current iterations of concurrent marking and concurrent
     // refinement, if any are in progress.
@@ -1280,31 +1282,33 @@
     empty_young_list();
     g1_policy()->set_full_young_gcs(true);
 
-    // See the comment in G1CollectedHeap::ref_processing_init() about
+    // See the comments in g1CollectedHeap.hpp and
+    // G1CollectedHeap::ref_processing_init() about
     // how reference processing currently works in G1.
 
-    // Temporarily make reference _discovery_ single threaded (non-MT).
-    ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
-
-    // Temporarily make refs discovery atomic
-    ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
-
-    // Temporarily clear _is_alive_non_header
-    ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
-
-    ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(do_clear_all_soft_refs);
+    // Temporarily make discovery by the STW ref processor single threaded (non-MT).
+    ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
+
+    // Temporarily clear the STW ref processor's _is_alive_non_header field.
+    ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
+
+    ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+    ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
+
     // Do collection work
     {
       HandleMark hm;  // Discard invalid handles created during gc
-      G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
+      G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
     }
+
     assert(free_regions() == 0, "we should not have added any free regions");
     rebuild_region_lists();
 
     _summary_bytes_used = recalculate_used();
 
-    ref_processor()->enqueue_discovered_references();
+    // Enqueue any discovered reference objects that have
+    // not been removed from the discovered lists.
+    ref_processor_stw()->enqueue_discovered_references();
 
     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
@@ -1319,7 +1323,16 @@
                        /* option      */ VerifyOption_G1UsePrevMarking);
 
     }
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
+
+    assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+    ref_processor_stw()->verify_no_references_recorded();
+
+    // Note: since we've just done a full GC, concurrent
+    // marking is no longer active. Therefore we need not
+    // re-enable reference discovery for the CM ref processor.
+    // That will be done at the start of the next marking cycle.
+    assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
+    ref_processor_cm()->verify_no_references_recorded();
 
     reset_gc_time_stamp();
     // Since everything potentially moved, we will clear all remembered
@@ -1414,7 +1427,7 @@
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
-  g1mm()->update_counters();
+  g1mm()->update_sizes();
   post_full_gc_dump();
 
   return true;
@@ -1772,14 +1785,17 @@
   _g1_policy(policy_),
   _dirty_card_queue_set(false),
   _into_cset_dirty_card_queue_set(false),
-  _is_alive_closure(this),
-  _ref_processor(NULL),
+  _is_alive_closure_cm(this),
+  _is_alive_closure_stw(this),
+  _ref_processor_cm(NULL),
+  _ref_processor_stw(NULL),
   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
   _bot_shared(NULL),
   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
   _evac_failure_scan_stack(NULL) ,
   _mark_in_progress(false),
   _cg1r(NULL), _summary_bytes_used(0),
+  _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
   _free_list("Master Free List"),
@@ -2059,7 +2075,7 @@
 
   // Do create of the monitoring and management support so that
   // values in the heap have been properly initialized.
-  _g1mm = new G1MonitoringSupport(this, &_g1_storage);
+  _g1mm = new G1MonitoringSupport(this);
 
   return JNI_OK;
 }
@@ -2067,34 +2083,81 @@
 void G1CollectedHeap::ref_processing_init() {
   // Reference processing in G1 currently works as follows:
   //
-  // * There is only one reference processor instance that
-  //   'spans' the entire heap. It is created by the code
-  //   below.
-  // * Reference discovery is not enabled during an incremental
-  //   pause (see 6484982).
-  // * Discoverered refs are not enqueued nor are they processed
-  //   during an incremental pause (see 6484982).
-  // * Reference discovery is enabled at initial marking.
-  // * Reference discovery is disabled and the discovered
-  //   references processed etc during remarking.
-  // * Reference discovery is MT (see below).
-  // * Reference discovery requires a barrier (see below).
-  // * Reference processing is currently not MT (see 6608385).
-  // * A full GC enables (non-MT) reference discovery and
-  //   processes any discovered references.
+  // * There are two reference processor instances. One is
+  //   used to record and process discovered references
+  //   during concurrent marking; the other is used to
+  //   record and process references during STW pauses
+  //   (both full and incremental).
+  // * Both ref processors need to 'span' the entire heap as
+  //   the regions in the collection set may be dotted around.
+  //
+  // * For the concurrent marking ref processor:
+  //   * Reference discovery is enabled at initial marking.
+  //   * Reference discovery is disabled and the discovered
+  //     references processed etc during remarking.
+  //   * Reference discovery is MT (see below).
+  //   * Reference discovery requires a barrier (see below).
+  //   * Reference processing may or may not be MT
+  //     (depending on the value of ParallelRefProcEnabled
+  //     and ParallelGCThreads).
+  //   * A full GC disables reference discovery by the CM
+  //     ref processor and abandons any entries on it's
+  //     discovered lists.
+  //
+  // * For the STW processor:
+  //   * Non MT discovery is enabled at the start of a full GC.
+  //   * Processing and enqueueing during a full GC is non-MT.
+  //   * During a full GC, references are processed after marking.
+  //
+  //   * Discovery (may or may not be MT) is enabled at the start
+  //     of an incremental evacuation pause.
+  //   * References are processed near the end of a STW evacuation pause.
+  //   * For both types of GC:
+  //     * Discovery is atomic - i.e. not concurrent.
+  //     * Reference discovery will not need a barrier.
 
   SharedHeap::ref_processing_init();
   MemRegion mr = reserved_region();
-  _ref_processor =
+
+  // Concurrent Mark ref processor
+  _ref_processor_cm =
     new ReferenceProcessor(mr,    // span
-                           ParallelRefProcEnabled && (ParallelGCThreads > 1),    // mt processing
-                           (int) ParallelGCThreads,   // degree of mt processing
-                           ParallelGCThreads > 1 || ConcGCThreads > 1,  // mt discovery
-                           (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
-                           false,                     // Reference discovery is not atomic
-                           &_is_alive_closure,        // is alive closure for efficiency
-                           true);                     // Setting next fields of discovered
-                                                      // lists requires a barrier.
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
+                                // mt processing
+                           (int) ParallelGCThreads,
+                                // degree of mt processing
+                           (ParallelGCThreads > 1) || (ConcGCThreads > 1),
+                                // mt discovery
+                           (int) MAX2(ParallelGCThreads, ConcGCThreads),
+                                // degree of mt discovery
+                           false,
+                                // Reference discovery is not atomic
+                           &_is_alive_closure_cm,
+                                // is alive closure
+                                // (for efficiency/performance)
+                           true);
+                                // Setting next fields of discovered
+                                // lists requires a barrier.
+
+  // STW ref processor
+  _ref_processor_stw =
+    new ReferenceProcessor(mr,    // span
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
+                                // mt processing
+                           MAX2((int)ParallelGCThreads, 1),
+                                // degree of mt processing
+                           (ParallelGCThreads > 1),
+                                // mt discovery
+                           MAX2((int)ParallelGCThreads, 1),
+                                // degree of mt discovery
+                           true,
+                                // Reference discovery is atomic
+                           &_is_alive_closure_stw,
+                                // is alive closure
+                                // (for efficiency/performance)
+                           false);
+                                // Setting next fields of discovered
+                                // lists requires a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -3117,6 +3180,10 @@
   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
                         "derived pointer present"));
   // always_do_update_barrier = true;
+
+  // We have just completed a GC. Update the soft reference
+  // policy with the new heap occupancy
+  Universe::update_heap_info_at_gc();
 }
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
@@ -3298,6 +3365,14 @@
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
 
+    // We do not allow initial-mark to be piggy-backed on a
+    // partially-young GC.
+    assert(!g1_policy()->during_initial_mark_pause() ||
+            g1_policy()->full_young_gcs(), "sanity");
+
+    // We also do not allow partially-young GCs during marking.
+    assert(!mark_in_progress() || g1_policy()->full_young_gcs(), "sanity");
+
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->full_young_gcs()) {
@@ -3354,231 +3429,242 @@
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-      // Please see comment in G1CollectedHeap::ref_processing_init()
-      // to see how reference processing currently works in G1.
-      //
-      // We want to turn off ref discovery, if necessary, and turn it back on
-      // on again later if we do. XXX Dubious: why is discovery disabled?
-      bool was_enabled = ref_processor()->discovery_enabled();
-      if (was_enabled) ref_processor()->disable_discovery();
-
-      // Forget the current alloc region (we might even choose it to be part
-      // of the collection set!).
-      release_mutator_alloc_region();
-
-      // We should call this after we retire the mutator alloc
-      // region(s) so that all the ALLOC / RETIRE events are generated
-      // before the start GC event.
-      _hr_printer.start_gc(false /* full */, (size_t) total_collections());
-
-      // The elapsed time induced by the start time below deliberately elides
-      // the possible verification above.
-      double start_time_sec = os::elapsedTime();
-      size_t start_used_bytes = used();
+      // Please see comment in g1CollectedHeap.hpp and
+      // G1CollectedHeap::ref_processing_init() to see how
+      // reference processing currently works in G1.
+
+      // Enable discovery in the STW reference processor
+      ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
+                                            true /*verify_no_refs*/);
+
+      {
+        // We want to temporarily turn off discovery by the
+        // CM ref processor, if necessary, and turn it back on
+        // on again later if we do. Using a scoped
+        // NoRefDiscovery object will do this.
+        NoRefDiscovery no_cm_discovery(ref_processor_cm());
+
+        // Forget the current alloc region (we might even choose it to be part
+        // of the collection set!).
+        release_mutator_alloc_region();
+
+        // We should call this after we retire the mutator alloc
+        // region(s) so that all the ALLOC / RETIRE events are generated
+        // before the start GC event.
+        _hr_printer.start_gc(false /* full */, (size_t) total_collections());
+
+        // The elapsed time induced by the start time below deliberately elides
+        // the possible verification above.
+        double start_time_sec = os::elapsedTime();
+        size_t start_used_bytes = used();
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->record_collection_pause_start(start_time_sec,
-                                                 start_used_bytes);
+        g1_policy()->record_collection_pause_start(start_time_sec,
+                                                   start_used_bytes);
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
-      _young_list->print();
+        gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
+        _young_list->print();
 #endif // YOUNG_LIST_VERBOSE
 
-      if (g1_policy()->during_initial_mark_pause()) {
-        concurrent_mark()->checkpointRootsInitialPre();
-      }
-      perm_gen()->save_marks();
-
-      // We must do this before any possible evacuation that should propagate
-      // marks.
-      if (mark_in_progress()) {
-        double start_time_sec = os::elapsedTime();
-
-        _cm->drainAllSATBBuffers();
-        double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
-        g1_policy()->record_satb_drain_time(finish_mark_ms);
-      }
-      // Record the number of elements currently on the mark stack, so we
-      // only iterate over these.  (Since evacuation may add to the mark
-      // stack, doing more exposes race conditions.)  If no mark is in
-      // progress, this will be zero.
-      _cm->set_oops_do_bound();
-
-      if (mark_in_progress()) {
-        concurrent_mark()->newCSet();
-      }
+        if (g1_policy()->during_initial_mark_pause()) {
+          concurrent_mark()->checkpointRootsInitialPre();
+        }
+        perm_gen()->save_marks();
+
+        // We must do this before any possible evacuation that should propagate
+        // marks.
+        if (mark_in_progress()) {
+          double start_time_sec = os::elapsedTime();
+
+          _cm->drainAllSATBBuffers();
+          double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
+          g1_policy()->record_satb_drain_time(finish_mark_ms);
+        }
+        // Record the number of elements currently on the mark stack, so we
+        // only iterate over these.  (Since evacuation may add to the mark
+        // stack, doing more exposes race conditions.)  If no mark is in
+        // progress, this will be zero.
+        _cm->set_oops_do_bound();
+
+        if (mark_in_progress()) {
+          concurrent_mark()->newCSet();
+        }
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->choose_collection_set(target_pause_time_ms);
-
-      if (_hr_printer.is_active()) {
-        HeapRegion* hr = g1_policy()->collection_set();
-        while (hr != NULL) {
-          G1HRPrinter::RegionType type;
-          if (!hr->is_young()) {
-            type = G1HRPrinter::Old;
-          } else if (hr->is_survivor()) {
-            type = G1HRPrinter::Survivor;
-          } else {
-            type = G1HRPrinter::Eden;
+        g1_policy()->choose_collection_set(target_pause_time_ms);
+
+        if (_hr_printer.is_active()) {
+          HeapRegion* hr = g1_policy()->collection_set();
+          while (hr != NULL) {
+            G1HRPrinter::RegionType type;
+            if (!hr->is_young()) {
+              type = G1HRPrinter::Old;
+            } else if (hr->is_survivor()) {
+              type = G1HRPrinter::Survivor;
+            } else {
+              type = G1HRPrinter::Eden;
+            }
+            _hr_printer.cset(hr);
+            hr = hr->next_in_collection_set();
           }
-          _hr_printer.cset(hr);
-          hr = hr->next_in_collection_set();
         }
-      }
-
-      // We have chosen the complete collection set. If marking is
-      // active then, we clear the region fields of any of the
-      // concurrent marking tasks whose region fields point into
-      // the collection set as these values will become stale. This
-      // will cause the owning marking threads to claim a new region
-      // when marking restarts.
-      if (mark_in_progress()) {
-        concurrent_mark()->reset_active_task_region_fields_in_cset();
-      }
+
+        // We have chosen the complete collection set. If marking is
+        // active then, we clear the region fields of any of the
+        // concurrent marking tasks whose region fields point into
+        // the collection set as these values will become stale. This
+        // will cause the owning marking threads to claim a new region
+        // when marking restarts.
+        if (mark_in_progress()) {
+          concurrent_mark()->reset_active_task_region_fields_in_cset();
+        }
 
 #ifdef ASSERT
-      VerifyCSetClosure cl;
-      collection_set_iterate(&cl);
+        VerifyCSetClosure cl;
+        collection_set_iterate(&cl);
 #endif // ASSERT
 
-      setup_surviving_young_words();
-
-      // Initialize the GC alloc regions.
-      init_gc_alloc_regions();
-
-      // Actually do the work...
-      evacuate_collection_set();
-
-      free_collection_set(g1_policy()->collection_set());
-      g1_policy()->clear_collection_set();
-
-      cleanup_surviving_young_words();
-
-      // Start a new incremental collection set for the next pause.
-      g1_policy()->start_incremental_cset_building();
-
-      // Clear the _cset_fast_test bitmap in anticipation of adding
-      // regions to the incremental collection set for the next
-      // evacuation pause.
-      clear_cset_fast_test();
-
-      _young_list->reset_sampled_info();
-
-      // Don't check the whole heap at this point as the
-      // GC alloc regions from this pause have been tagged
-      // as survivors and moved on to the survivor list.
-      // Survivor regions will fail the !is_young() check.
-      assert(check_young_list_empty(false /* check_heap */),
-        "young list should be empty");
+        setup_surviving_young_words();
+
+        // Initialize the GC alloc regions.
+        init_gc_alloc_regions();
+
+        // Actually do the work...
+        evacuate_collection_set();
+
+        free_collection_set(g1_policy()->collection_set());
+        g1_policy()->clear_collection_set();
+
+        cleanup_surviving_young_words();
+
+        // Start a new incremental collection set for the next pause.
+        g1_policy()->start_incremental_cset_building();
+
+        // Clear the _cset_fast_test bitmap in anticipation of adding
+        // regions to the incremental collection set for the next
+        // evacuation pause.
+        clear_cset_fast_test();
+
+        _young_list->reset_sampled_info();
+
+        // Don't check the whole heap at this point as the
+        // GC alloc regions from this pause have been tagged
+        // as survivors and moved on to the survivor list.
+        // Survivor regions will fail the !is_young() check.
+        assert(check_young_list_empty(false /* check_heap */),
+          "young list should be empty");
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
-      _young_list->print();
+        gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
+        _young_list->print();
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->record_survivor_regions(_young_list->survivor_length(),
-        _young_list->first_survivor_region(),
-        _young_list->last_survivor_region());
-
-      _young_list->reset_auxilary_lists();
-
-      if (evacuation_failed()) {
-        _summary_bytes_used = recalculate_used();
-      } else {
-        // The "used" of the the collection set have already been subtracted
-        // when they were freed.  Add in the bytes evacuated.
-        _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
-      }
-
-      if (g1_policy()->during_initial_mark_pause()) {
-        concurrent_mark()->checkpointRootsInitialPost();
-        set_marking_started();
-        // CAUTION: after the doConcurrentMark() call below,
-        // the concurrent marking thread(s) could be running
-        // concurrently with us. Make sure that anything after
-        // this point does not assume that we are the only GC thread
-        // running. Note: of course, the actual marking work will
-        // not start until the safepoint itself is released in
-        // ConcurrentGCThread::safepoint_desynchronize().
-        doConcurrentMark();
-      }
-
-      allocate_dummy_regions();
+        g1_policy()->record_survivor_regions(_young_list->survivor_length(),
+                                            _young_list->first_survivor_region(),
+                                            _young_list->last_survivor_region());
+
+        _young_list->reset_auxilary_lists();
+
+        if (evacuation_failed()) {
+          _summary_bytes_used = recalculate_used();
+        } else {
+          // The "used" of the the collection set have already been subtracted
+          // when they were freed.  Add in the bytes evacuated.
+          _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
+        }
+
+        if (g1_policy()->during_initial_mark_pause()) {
+          concurrent_mark()->checkpointRootsInitialPost();
+          set_marking_started();
+          // CAUTION: after the doConcurrentMark() call below,
+          // the concurrent marking thread(s) could be running
+          // concurrently with us. Make sure that anything after
+          // this point does not assume that we are the only GC thread
+          // running. Note: of course, the actual marking work will
+          // not start until the safepoint itself is released in
+          // ConcurrentGCThread::safepoint_desynchronize().
+          doConcurrentMark();
+        }
+
+        allocate_dummy_regions();
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      init_mutator_alloc_region();
-
-      {
-        size_t expand_bytes = g1_policy()->expansion_amount();
-        if (expand_bytes > 0) {
-          size_t bytes_before = capacity();
-          if (!expand(expand_bytes)) {
-            // We failed to expand the heap so let's verify that
-            // committed/uncommitted amount match the backing store
-            assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
-            assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+        init_mutator_alloc_region();
+
+        {
+          size_t expand_bytes = g1_policy()->expansion_amount();
+          if (expand_bytes > 0) {
+            size_t bytes_before = capacity();
+            if (!expand(expand_bytes)) {
+              // We failed to expand the heap so let's verify that
+              // committed/uncommitted amount match the backing store
+              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
+              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+            }
           }
         }
+
+        double end_time_sec = os::elapsedTime();
+        double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
+        g1_policy()->record_pause_time_ms(pause_time_ms);
+        g1_policy()->record_collection_pause_end();
+
+        MemoryService::track_memory_usage();
+
+        // In prepare_for_verify() below we'll need to scan the deferred
+        // update buffers to bring the RSets up-to-date if
+        // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
+        // the update buffers we'll probably need to scan cards on the
+        // regions we just allocated to (i.e., the GC alloc
+        // regions). However, during the last GC we called
+        // set_saved_mark() on all the GC alloc regions, so card
+        // scanning might skip the [saved_mark_word()...top()] area of
+        // those regions (i.e., the area we allocated objects into
+        // during the last GC). But it shouldn't. Given that
+        // saved_mark_word() is conditional on whether the GC time stamp
+        // on the region is current or not, by incrementing the GC time
+        // stamp here we invalidate all the GC time stamps on all the
+        // regions and saved_mark_word() will simply return top() for
+        // all the regions. This is a nicer way of ensuring this rather
+        // than iterating over the regions and fixing them. In fact, the
+        // GC time stamp increment here also ensures that
+        // saved_mark_word() will return top() between pauses, i.e.,
+        // during concurrent refinement. So we don't need the
+        // is_gc_active() check to decided which top to use when
+        // scanning cards (see CR 7039627).
+        increment_gc_time_stamp();
+
+        if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
+          HandleMark hm;  // Discard invalid handles created during verification
+          gclog_or_tty->print(" VerifyAfterGC:");
+          prepare_for_verify();
+          Universe::verify(/* allow dirty */ true,
+                           /* silent      */ false,
+                           /* option      */ VerifyOption_G1UsePrevMarking);
+        }
+
+        assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+        ref_processor_stw()->verify_no_references_recorded();
+
+        // CM reference discovery will be re-enabled if necessary.
       }
 
-      double end_time_sec = os::elapsedTime();
-      double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
-      g1_policy()->record_pause_time_ms(pause_time_ms);
-      g1_policy()->record_collection_pause_end();
-
-      MemoryService::track_memory_usage();
-
-      // In prepare_for_verify() below we'll need to scan the deferred
-      // update buffers to bring the RSets up-to-date if
-      // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
-      // the update buffers we'll probably need to scan cards on the
-      // regions we just allocated to (i.e., the GC alloc
-      // regions). However, during the last GC we called
-      // set_saved_mark() on all the GC alloc regions, so card
-      // scanning might skip the [saved_mark_word()...top()] area of
-      // those regions (i.e., the area we allocated objects into
-      // during the last GC). But it shouldn't. Given that
-      // saved_mark_word() is conditional on whether the GC time stamp
-      // on the region is current or not, by incrementing the GC time
-      // stamp here we invalidate all the GC time stamps on all the
-      // regions and saved_mark_word() will simply return top() for
-      // all the regions. This is a nicer way of ensuring this rather
-      // than iterating over the regions and fixing them. In fact, the
-      // GC time stamp increment here also ensures that
-      // saved_mark_word() will return top() between pauses, i.e.,
-      // during concurrent refinement. So we don't need the
-      // is_gc_active() check to decided which top to use when
-      // scanning cards (see CR 7039627).
-      increment_gc_time_stamp();
-
-      if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
-        HandleMark hm;  // Discard invalid handles created during verification
-        gclog_or_tty->print(" VerifyAfterGC:");
-        prepare_for_verify();
-        Universe::verify(/* allow dirty */ true,
-                         /* silent      */ false,
-                         /* option      */ VerifyOption_G1UsePrevMarking);
-      }
-
-      if (was_enabled) ref_processor()->enable_discovery();
-
       {
         size_t expand_bytes = g1_policy()->expansion_amount();
         if (expand_bytes > 0) {
@@ -3630,7 +3716,7 @@
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
-  g1mm()->update_counters();
+  g1mm()->update_sizes();
 
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3728,34 +3814,6 @@
   _evac_failure_scan_stack = NULL;
 }
 
-// *** Sequential G1 Evacuation
-
-class G1IsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
-public:
-  G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_object(oop p) { assert(false, "Do not call."); }
-  bool do_object_b(oop p) {
-    // It is reachable if it is outside the collection set, or is inside
-    // and forwarded.
-    return !_g1->obj_in_cs(p) || p->is_forwarded();
-  }
-};
-
-class G1KeepAliveClosure: public OopClosure {
-  G1CollectedHeap* _g1;
-public:
-  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
-  void do_oop(      oop* p) {
-    oop obj = *p;
-    if (_g1->obj_in_cs(obj)) {
-      assert( obj->is_forwarded(), "invariant" );
-      *p = obj->forwardee();
-    }
-  }
-};
-
 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
 private:
   G1CollectedHeap* _g1;
@@ -3946,7 +4004,8 @@
 
 oop
 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
-                                               oop old) {
+                                               oop old,
+                                               bool should_mark_root) {
   assert(obj_in_cs(old),
          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
                  (HeapWord*) old));
@@ -3954,6 +4013,16 @@
   oop forward_ptr = old->forward_to_atomic(old);
   if (forward_ptr == NULL) {
     // Forward-to-self succeeded.
+
+    // should_mark_root will be true when this routine is called
+    // from a root scanning closure during an initial mark pause.
+    // In this case the thread that succeeds in self-forwarding the
+    // object is also responsible for marking the object.
+    if (should_mark_root) {
+      assert(!oopDesc::is_null(old), "shouldn't be");
+      _cm->grayRoot(old);
+    }
+
     if (_evac_failure_closure != cl) {
       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
       assert(!_drain_in_progress,
@@ -4175,12 +4244,17 @@
 #endif // ASSERT
 
 void G1ParScanThreadState::trim_queue() {
+  assert(_evac_cl != NULL, "not set");
+  assert(_evac_failure_cl != NULL, "not set");
+  assert(_partial_scan_cl != NULL, "not set");
+
   StarTask ref;
   do {
     // Drain the overflow stack first, so other threads can steal.
     while (refs()->pop_overflow(ref)) {
       deal_with_reference(ref);
     }
+
     while (refs()->pop_local(ref)) {
       deal_with_reference(ref);
     }
@@ -4208,7 +4282,8 @@
   }
 }
 
-oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_copy) {
+oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_root,
+                                                     bool should_mark_copy) {
   size_t    word_sz = old->size();
   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
@@ -4228,7 +4303,7 @@
     // This will either forward-to-self, or detect that someone else has
     // installed a forwarding pointer.
     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
-    return _g1->handle_evacuation_failure_par(cl, old);
+    return _g1->handle_evacuation_failure_par(cl, old, should_mark_root);
   }
 
   // We're going to allocate linearly, so might as well prefetch ahead.
@@ -4330,11 +4405,26 @@
       // we also need to handle marking of roots in the
       // event of an evacuation failure. In the event of an
       // evacuation failure, the object is forwarded to itself
-      // and not copied so let's mark it here.
+      // and not copied. For root-scanning closures, the
+      // object would be marked after a successful self-forward
+      // but an object could be pointed to by both a root and non
+      // root location and be self-forwarded by a non-root-scanning
+      // closure. Therefore we also have to attempt to mark the
+      // self-forwarded root object here.
       if (do_mark_object && obj->forwardee() == obj) {
         mark_object(p);
       }
     } else {
+      // During an initial mark pause, objects that are pointed to
+      // by the roots need to be marked - even in the event of an
+      // evacuation failure. We pass the template parameter
+      // do_mark_object (which is true for root scanning closures
+      // during an initial mark pause) to copy_to_survivor_space
+      // which will pass it on to the evacuation failure handling
+      // code. The thread that successfully self-forwards a root
+      // object to itself is responsible for marking the object.
+      bool should_mark_root = do_mark_object;
+
       // We need to mark the copied object if we're a root scanning
       // closure during an initial mark pause (i.e. do_mark_object
       // will be true), or the object is already marked and we need
@@ -4343,7 +4433,8 @@
                               _during_initial_mark ||
                               (_mark_in_progress && !_g1->is_obj_ill(obj));
 
-      oop copy_oop = copy_to_survivor_space(obj, should_mark_copy);
+      oop copy_oop = copy_to_survivor_space(obj, should_mark_root,
+                                                 should_mark_copy);
       oopDesc::encode_store_heap_oop(p, copy_oop);
     }
     // When scanning the RS, we only care about objs in CS.
@@ -4501,35 +4592,34 @@
     ResourceMark rm;
     HandleMark   hm;
 
+    ReferenceProcessor*             rp = _g1h->ref_processor_stw();
+
     G1ParScanThreadState            pss(_g1h, i);
-    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
-    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
-    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
 
     pss.set_evac_closure(&scan_evac_cl);
     pss.set_evac_failure_closure(&evac_failure_cl);
     pss.set_partial_scan_closure(&partial_scan_cl);
 
-    G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
-    G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
-    G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
-    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
-
-    G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
-    G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
-    G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
-
-    OopsInHeapRegionClosure        *scan_root_cl;
-    OopsInHeapRegionClosure        *scan_perm_cl;
+    G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
+    G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
+
+    G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
+    G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
+
+    OopClosure*                    scan_root_cl = &only_scan_root_cl;
+    OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
 
     if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
       scan_root_cl = &scan_mark_root_cl;
       scan_perm_cl = &scan_mark_perm_cl;
-    } else {
-      scan_root_cl = &only_scan_root_cl;
-      scan_perm_cl = &only_scan_perm_cl;
     }
 
+    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
+
     pss.start_strong_roots();
     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
                                   SharedHeap::SO_AllClasses,
@@ -4577,6 +4667,7 @@
                         OopsInHeapRegionClosure* scan_rs,
                         OopsInGenClosure* scan_perm,
                         int worker_i) {
+
   // First scan the strong roots, including the perm gen.
   double ext_roots_start = os::elapsedTime();
   double closure_app_time_sec = 0.0;
@@ -4595,12 +4686,13 @@
                        &eager_scan_code_roots,
                        &buf_scan_perm);
 
-  // Now the ref_processor roots.
+  // Now the CM ref_processor roots.
   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
-    // We need to treat the discovered reference lists as roots and
-    // keep entries (which are added by the marking threads) on them
-    // live until they can be processed at the end of marking.
-    ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
+    // We need to treat the discovered reference lists of the
+    // concurrent mark ref processor as roots and keep entries
+    // (which are added by the marking threads) on them live
+    // until they can be processed at the end of marking.
+    ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
   }
 
   // Finish up any enqueued closure apps (attributed as object copy time).
@@ -4641,6 +4733,524 @@
   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
 }
 
+// Weak Reference Processing support
+
+// An always "is_alive" closure that is used to preserve referents.
+// If the object is non-null then it's alive.  Used in the preservation
+// of referent objects that are pointed to by reference objects
+// discovered by the CM ref processor.
+class G1AlwaysAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_object(oop p) { assert(false, "Do not call."); }
+  bool do_object_b(oop p) {
+    if (p != NULL) {
+      return true;
+    }
+    return false;
+  }
+};
+
+bool G1STWIsAliveClosure::do_object_b(oop p) {
+  // An object is reachable if it is outside the collection set,
+  // or is inside and copied.
+  return !_g1->obj_in_cs(p) || p->is_forwarded();
+}
+
+// Non Copying Keep Alive closure
+class G1KeepAliveClosure: public OopClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
+  void do_oop(      oop* p) {
+    oop obj = *p;
+
+    if (_g1->obj_in_cs(obj)) {
+      assert( obj->is_forwarded(), "invariant" );
+      *p = obj->forwardee();
+    }
+  }
+};
+
+// Copying Keep Alive closure - can be called from both
+// serial and parallel code as long as different worker
+// threads utilize different G1ParScanThreadState instances
+// and different queues.
+
+class G1CopyingKeepAliveClosure: public OopClosure {
+  G1CollectedHeap*         _g1h;
+  OopClosure*              _copy_non_heap_obj_cl;
+  OopsInHeapRegionClosure* _copy_perm_obj_cl;
+  G1ParScanThreadState*    _par_scan_state;
+
+public:
+  G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
+                            OopClosure* non_heap_obj_cl,
+                            OopsInHeapRegionClosure* perm_obj_cl,
+                            G1ParScanThreadState* pss):
+    _g1h(g1h),
+    _copy_non_heap_obj_cl(non_heap_obj_cl),
+    _copy_perm_obj_cl(perm_obj_cl),
+    _par_scan_state(pss)
+  {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(      oop* p) { do_oop_work(p); }
+
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+
+    if (_g1h->obj_in_cs(obj)) {
+      // If the referent object has been forwarded (either copied
+      // to a new location or to itself in the event of an
+      // evacuation failure) then we need to update the reference
+      // field and, if both reference and referent are in the G1
+      // heap, update the RSet for the referent.
+      //
+      // If the referent has not been forwarded then we have to keep
+      // it alive by policy. Therefore we have copy the referent.
+      //
+      // If the reference field is in the G1 heap then we can push
+      // on the PSS queue. When the queue is drained (after each
+      // phase of reference processing) the object and it's followers
+      // will be copied, the reference field set to point to the
+      // new location, and the RSet updated. Otherwise we need to
+      // use the the non-heap or perm closures directly to copy
+      // the refernt object and update the pointer, while avoiding
+      // updating the RSet.
+
+      if (_g1h->is_in_g1_reserved(p)) {
+        _par_scan_state->push_on_queue(p);
+      } else {
+        // The reference field is not in the G1 heap.
+        if (_g1h->perm_gen()->is_in(p)) {
+          _copy_perm_obj_cl->do_oop(p);
+        } else {
+          _copy_non_heap_obj_cl->do_oop(p);
+        }
+      }
+    }
+  }
+};
+
+// Serial drain queue closure. Called as the 'complete_gc'
+// closure for each discovered list in some of the
+// reference processing phases.
+
+class G1STWDrainQueueClosure: public VoidClosure {
+protected:
+  G1CollectedHeap* _g1h;
+  G1ParScanThreadState* _par_scan_state;
+
+  G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
+
+public:
+  G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
+    _g1h(g1h),
+    _par_scan_state(pss)
+  { }
+
+  void do_void() {
+    G1ParScanThreadState* const pss = par_scan_state();
+    pss->trim_queue();
+  }
+};
+
+// Parallel Reference Processing closures
+
+// Implementation of AbstractRefProcTaskExecutor for parallel reference
+// processing during G1 evacuation pauses.
+
+class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+private:
+  G1CollectedHeap*   _g1h;
+  RefToScanQueueSet* _queues;
+  WorkGang*          _workers;
+  int                _active_workers;
+
+public:
+  G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
+                        WorkGang* workers,
+                        RefToScanQueueSet *task_queues,
+                        int n_workers) :
+    _g1h(g1h),
+    _queues(task_queues),
+    _workers(workers),
+    _active_workers(n_workers)
+  {
+    assert(n_workers > 0, "shouldn't call this otherwise");
+  }
+
+  // Executes the given task using concurrent marking worker threads.
+  virtual void execute(ProcessTask& task);
+  virtual void execute(EnqueueTask& task);
+};
+
+// Gang task for possibly parallel reference processing
+
+class G1STWRefProcTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+  ProcessTask&     _proc_task;
+  G1CollectedHeap* _g1h;
+  RefToScanQueueSet *_task_queues;
+  ParallelTaskTerminator* _terminator;
+
+public:
+  G1STWRefProcTaskProxy(ProcessTask& proc_task,
+                     G1CollectedHeap* g1h,
+                     RefToScanQueueSet *task_queues,
+                     ParallelTaskTerminator* terminator) :
+    AbstractGangTask("Process reference objects in parallel"),
+    _proc_task(proc_task),
+    _g1h(g1h),
+    _task_queues(task_queues),
+    _terminator(terminator)
+  {}
+
+  virtual void work(int i) {
+    // The reference processing task executed by a single worker.
+    ResourceMark rm;
+    HandleMark   hm;
+
+    G1STWIsAliveClosure is_alive(_g1h);
+
+    G1ParScanThreadState pss(_g1h, i);
+
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
+
+    pss.set_evac_closure(&scan_evac_cl);
+    pss.set_evac_failure_closure(&evac_failure_cl);
+    pss.set_partial_scan_closure(&partial_scan_cl);
+
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
+
+    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
+      copy_non_heap_cl = &copy_mark_non_heap_cl;
+      copy_perm_cl = &copy_mark_perm_cl;
+    }
+
+    // Keep alive closure.
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
+
+    // Complete GC closure
+    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
+
+    // Call the reference processing task's work routine.
+    _proc_task.work(i, is_alive, keep_alive, drain_queue);
+
+    // Note we cannot assert that the refs array is empty here as not all
+    // of the processing tasks (specifically phase2 - pp2_work) execute
+    // the complete_gc closure (which ordinarily would drain the queue) so
+    // the queue may not be empty.
+  }
+};
+
+// Driver routine for parallel reference processing.
+// Creates an instance of the ref processing gang
+// task and has the worker threads execute it.
+void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
+  assert(_workers != NULL, "Need parallel worker threads.");
+
+  ParallelTaskTerminator terminator(_active_workers, _queues);
+  G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
+
+  _g1h->set_par_threads(_active_workers);
+  _workers->run_task(&proc_task_proxy);
+  _g1h->set_par_threads(0);
+}
+
+// Gang task for parallel reference enqueueing.
+
+class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
+  EnqueueTask& _enq_task;
+
+public:
+  G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
+    AbstractGangTask("Enqueue reference objects in parallel"),
+    _enq_task(enq_task)
+  { }
+
+  virtual void work(int i) {
+    _enq_task.work(i);
+  }
+};
+
+// Driver routine for parallel reference enqueing.
+// Creates an instance of the ref enqueueing gang
+// task and has the worker threads execute it.
+
+void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
+  assert(_workers != NULL, "Need parallel worker threads.");
+
+  G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
+
+  _g1h->set_par_threads(_active_workers);
+  _workers->run_task(&enq_task_proxy);
+  _g1h->set_par_threads(0);
+}
+
+// End of weak reference support closures
+
+// Abstract task used to preserve (i.e. copy) any referent objects
+// that are in the collection set and are pointed to by reference
+// objects discovered by the CM ref processor.
+
+class G1ParPreserveCMReferentsTask: public AbstractGangTask {
+protected:
+  G1CollectedHeap* _g1h;
+  RefToScanQueueSet      *_queues;
+  ParallelTaskTerminator _terminator;
+  int _n_workers;
+
+public:
+  G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
+    AbstractGangTask("ParPreserveCMReferents"),
+    _g1h(g1h),
+    _queues(task_queues),
+    _terminator(workers, _queues),
+    _n_workers(workers)
+  { }
+
+  void work(int i) {
+    ResourceMark rm;
+    HandleMark   hm;
+
+    G1ParScanThreadState            pss(_g1h, i);
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
+
+    pss.set_evac_closure(&scan_evac_cl);
+    pss.set_evac_failure_closure(&evac_failure_cl);
+    pss.set_partial_scan_closure(&partial_scan_cl);
+
+    assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+
+
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
+
+    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
+      copy_non_heap_cl = &copy_mark_non_heap_cl;
+      copy_perm_cl = &copy_mark_perm_cl;
+    }
+
+    // Is alive closure
+    G1AlwaysAliveClosure always_alive(_g1h);
+
+    // Copying keep alive closure. Applied to referent objects that need
+    // to be copied.
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
+
+    ReferenceProcessor* rp = _g1h->ref_processor_cm();
+
+    int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
+    int stride = MIN2(MAX2(_n_workers, 1), limit);
+
+    // limit is set using max_num_q() - which was set using ParallelGCThreads.
+    // So this must be true - but assert just in case someone decides to
+    // change the worker ids.
+    assert(0 <= i && i < limit, "sanity");
+    assert(!rp->discovery_is_atomic(), "check this code");
+
+    // Select discovered lists [i, i+stride, i+2*stride,...,limit)
+    for (int idx = i; idx < limit; idx += stride) {
+      DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
+
+      DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
+      while (iter.has_next()) {
+        // Since discovery is not atomic for the CM ref processor, we
+        // can see some null referent objects.
+        iter.load_ptrs(DEBUG_ONLY(true));
+        oop ref = iter.obj();
+
+        // This will filter nulls.
+        if (iter.is_referent_alive()) {
+          iter.make_referent_alive();
+        }
+        iter.move_to_next();
+      }
+    }
+
+    // Drain the queue - which may cause stealing
+    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
+    drain_queue.do_void();
+    // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
+    assert(pss.refs()->is_empty(), "should be");
+  }
+};
+
+// Weak Reference processing during an evacuation pause (part 1).
+void G1CollectedHeap::process_discovered_references() {
+  double ref_proc_start = os::elapsedTime();
+
+  ReferenceProcessor* rp = _ref_processor_stw;
+  assert(rp->discovery_enabled(), "should have been enabled");
+
+  // Any reference objects, in the collection set, that were 'discovered'
+  // by the CM ref processor should have already been copied (either by
+  // applying the external root copy closure to the discovered lists, or
+  // by following an RSet entry).
+  //
+  // But some of the referents, that are in the collection set, that these
+  // reference objects point to may not have been copied: the STW ref
+  // processor would have seen that the reference object had already
+  // been 'discovered' and would have skipped discovering the reference,
+  // but would not have treated the reference object as a regular oop.
+  // As a reult the copy closure would not have been applied to the
+  // referent object.
+  //
+  // We need to explicitly copy these referent objects - the references
+  // will be processed at the end of remarking.
+  //
+  // We also need to do this copying before we process the reference
+  // objects discovered by the STW ref processor in case one of these
+  // referents points to another object which is also referenced by an
+  // object discovered by the STW ref processor.
+
+  int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                        workers()->total_workers() : 1);
+
+  set_par_threads(n_workers);
+  G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues);
+
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    workers()->run_task(&keep_cm_referents);
+  } else {
+    keep_cm_referents.work(0);
+  }
+
+  set_par_threads(0);
+
+  // Closure to test whether a referent is alive.
+  G1STWIsAliveClosure is_alive(this);
+
+  // Even when parallel reference processing is enabled, the processing
+  // of JNI refs is serial and performed serially by the current thread
+  // rather than by a worker. The following PSS will be used for processing
+  // JNI refs.
+
+  // Use only a single queue for this PSS.
+  G1ParScanThreadState pss(this, 0);
+
+  // We do not embed a reference processor in the copying/scanning
+  // closures while we're actually processing the discovered
+  // reference objects.
+  G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
+  G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
+  G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
+
+  pss.set_evac_closure(&scan_evac_cl);
+  pss.set_evac_failure_closure(&evac_failure_cl);
+  pss.set_partial_scan_closure(&partial_scan_cl);
+
+  assert(pss.refs()->is_empty(), "pre-condition");
+
+  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
+  G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
+
+  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
+  G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
+
+  OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+  OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+  if (_g1h->g1_policy()->during_initial_mark_pause()) {
+    // We also need to mark copied objects.
+    copy_non_heap_cl = &copy_mark_non_heap_cl;
+    copy_perm_cl = &copy_mark_perm_cl;
+  }
+
+  // Keep alive closure.
+  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
+
+  // Serial Complete GC closure
+  G1STWDrainQueueClosure drain_queue(this, &pss);
+
+  // Setup the soft refs policy...
+  rp->setup_policy(false);
+
+  if (!rp->processing_is_mt()) {
+    // Serial reference processing...
+    rp->process_discovered_references(&is_alive,
+                                      &keep_alive,
+                                      &drain_queue,
+                                      NULL);
+  } else {
+    // Parallel reference processing
+    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
+    assert(rp->num_q() == active_workers, "sanity");
+    assert(active_workers <= rp->max_num_q(), "sanity");
+
+    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+    rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
+  }
+
+  // We have completed copying any necessary live referent objects
+  // (that were not copied during the actual pause) so we can
+  // retire any active alloc buffers
+  pss.retire_alloc_buffers();
+  assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+
+  double ref_proc_time = os::elapsedTime() - ref_proc_start;
+  g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
+}
+
+// Weak Reference processing during an evacuation pause (part 2).
+void G1CollectedHeap::enqueue_discovered_references() {
+  double ref_enq_start = os::elapsedTime();
+
+  ReferenceProcessor* rp = _ref_processor_stw;
+  assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
+
+  // Now enqueue any remaining on the discovered lists on to
+  // the pending list.
+  if (!rp->processing_is_mt()) {
+    // Serial reference processing...
+    rp->enqueue_discovered_references();
+  } else {
+    // Parallel reference enqueuing
+
+    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
+    assert(rp->num_q() == active_workers, "sanity");
+    assert(active_workers <= rp->max_num_q(), "sanity");
+
+    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+    rp->enqueue_discovered_references(&par_task_executor);
+  }
+
+  rp->verify_no_references_recorded();
+  assert(!rp->discovery_enabled(), "should have been disabled");
+
+  // FIXME
+  // CM's reference processing also cleans up the string and symbol tables.
+  // Should we do that here also? We could, but it is a serial operation
+  // and could signicantly increase the pause time.
+
+  double ref_enq_time = os::elapsedTime() - ref_enq_start;
+  g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
+}
+
 void G1CollectedHeap::evacuate_collection_set() {
   set_evacuation_failed(false);
 
@@ -4658,6 +5268,7 @@
 
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
   double start_par = os::elapsedTime();
+
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     // The individual threads will set their evac-failure closures.
     StrongRootsScope srs(this);
@@ -4672,15 +5283,23 @@
   g1_policy()->record_par_time(par_time);
   set_par_threads(0);
 
+  // Process any discovered reference objects - we have
+  // to do this _before_ we retire the GC alloc regions
+  // as we may have to copy some 'reachable' referent
+  // objects (and their reachable sub-graphs) that were
+  // not copied during the pause.
+  process_discovered_references();
+
   // Weak root processing.
   // Note: when JSR 292 is enabled and code blobs can contain
   // non-perm oops then we will need to process the code blobs
   // here too.
   {
-    G1IsAliveClosure is_alive(this);
+    G1STWIsAliveClosure is_alive(this);
     G1KeepAliveClosure keep_alive(this);
     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
   }
+
   release_gc_alloc_regions();
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
@@ -4702,6 +5321,15 @@
     }
   }
 
+  // Enqueue any remaining references remaining on the STW
+  // reference processor's discovered lists. We need to do
+  // this after the card table is cleaned (and verified) as
+  // the act of enqueuing entries on to the pending list
+  // will log these updates (and dirty their associated
+  // cards). We need these updates logged to update any
+  // RSets.
+  enqueue_discovered_references();
+
   if (G1DeferredRSUpdate) {
     RedirtyLoggedCardTableEntryFastClosure redirty;
     dirty_card_queue_set().set_closure(&redirty);
@@ -4902,7 +5530,7 @@
   }
 
   double elapsed = os::elapsedTime() - start;
-  g1_policy()->record_clear_ct_time( elapsed * 1000.0);
+  g1_policy()->record_clear_ct_time(elapsed * 1000.0);
 #ifndef PRODUCT
   if (G1VerifyCTCleanup || VerifyAfterGC) {
     G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
@@ -5193,7 +5821,6 @@
       g1_policy()->update_region_num(true /* next_is_young */);
       set_region_short_lived_locked(new_alloc_region);
       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
-      g1mm()->update_eden_counters();
       return new_alloc_region;
     }
   }
@@ -5208,6 +5835,10 @@
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
   _summary_bytes_used += allocated_bytes;
   _hr_printer.retire(alloc_region);
+  // We update the eden sizes here, when the region is retired,
+  // instead of when it's allocated, since this is the point that its
+  // used space has been recored in _summary_bytes_used.
+  g1mm()->update_eden_size();
 }
 
 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -155,6 +155,19 @@
     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 };
 
+// The G1 STW is alive closure.
+// An instance is embedded into the G1CH and used as the
+// (optional) _is_alive_non_header closure in the STW
+// reference processor. It is also extensively used during
+// refence processing during STW evacuation pauses.
+class G1STWIsAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_object(oop p) { assert(false, "Do not call."); }
+  bool do_object_b(oop p);
+};
+
 class SurvivorGCAllocRegion : public G1AllocRegion {
 protected:
   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
@@ -174,6 +187,7 @@
 };
 
 class RefineCardTableEntryClosure;
+
 class G1CollectedHeap : public SharedHeap {
   friend class VM_G1CollectForAllocation;
   friend class VM_GenCollectForPermanentAllocation;
@@ -573,9 +587,20 @@
   // allocated block, or else "NULL".
   HeapWord* expand_and_allocate(size_t word_size);
 
+  // Process any reference objects discovered during
+  // an incremental evacuation pause.
+  void process_discovered_references();
+
+  // Enqueue any remaining discovered references
+  // after processing.
+  void enqueue_discovered_references();
+
 public:
 
-  G1MonitoringSupport* g1mm() { return _g1mm; }
+  G1MonitoringSupport* g1mm() {
+    assert(_g1mm != NULL, "should have been initialized");
+    return _g1mm;
+  }
 
   // Expand the garbage-first heap by at least the given size (in bytes!).
   // Returns true if the heap was expanded by the requested amount;
@@ -822,17 +847,87 @@
   void finalize_for_evac_failure();
 
   // An attempt to evacuate "obj" has failed; take necessary steps.
-  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
+  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
+                                    bool should_mark_root);
   void handle_evacuation_failure_common(oop obj, markOop m);
 
+  // ("Weak") Reference processing support.
+  //
+  // G1 has 2 instances of the referece processor class. One
+  // (_ref_processor_cm) handles reference object discovery
+  // and subsequent processing during concurrent marking cycles.
+  //
+  // The other (_ref_processor_stw) handles reference object
+  // discovery and processing during full GCs and incremental
+  // evacuation pauses.
+  //
+  // During an incremental pause, reference discovery will be
+  // temporarily disabled for _ref_processor_cm and will be
+  // enabled for _ref_processor_stw. At the end of the evacuation
+  // pause references discovered by _ref_processor_stw will be
+  // processed and discovery will be disabled. The previous
+  // setting for reference object discovery for _ref_processor_cm
+  // will be re-instated.
+  //
+  // At the start of marking:
+  //  * Discovery by the CM ref processor is verified to be inactive
+  //    and it's discovered lists are empty.
+  //  * Discovery by the CM ref processor is then enabled.
+  //
+  // At the end of marking:
+  //  * Any references on the CM ref processor's discovered
+  //    lists are processed (possibly MT).
+  //
+  // At the start of full GC we:
+  //  * Disable discovery by the CM ref processor and
+  //    empty CM ref processor's discovered lists
+  //    (without processing any entries).
+  //  * Verify that the STW ref processor is inactive and it's
+  //    discovered lists are empty.
+  //  * Temporarily set STW ref processor discovery as single threaded.
+  //  * Temporarily clear the STW ref processor's _is_alive_non_header
+  //    field.
+  //  * Finally enable discovery by the STW ref processor.
+  //
+  // The STW ref processor is used to record any discovered
+  // references during the full GC.
+  //
+  // At the end of a full GC we:
+  //  * Enqueue any reference objects discovered by the STW ref processor
+  //    that have non-live referents. This has the side-effect of
+  //    making the STW ref processor inactive by disabling discovery.
+  //  * Verify that the CM ref processor is still inactive
+  //    and no references have been placed on it's discovered
+  //    lists (also checked as a precondition during initial marking).
+
+  // The (stw) reference processor...
+  ReferenceProcessor* _ref_processor_stw;
+
+  // During reference object discovery, the _is_alive_non_header
+  // closure (if non-null) is applied to the referent object to
+  // determine whether the referent is live. If so then the
+  // reference object does not need to be 'discovered' and can
+  // be treated as a regular oop. This has the benefit of reducing
+  // the number of 'discovered' reference objects that need to
+  // be processed.
+  //
+  // Instance of the is_alive closure for embedding into the
+  // STW reference processor as the _is_alive_non_header field.
+  // Supplying a value for the _is_alive_non_header field is
+  // optional but doing so prevents unnecessary additions to
+  // the discovered lists during reference discovery.
+  G1STWIsAliveClosure _is_alive_closure_stw;
+
+  // The (concurrent marking) reference processor...
+  ReferenceProcessor* _ref_processor_cm;
+
   // Instance of the concurrent mark is_alive closure for embedding
-  // into the reference processor as the is_alive_non_header. This
-  // prevents unnecessary additions to the discovered lists during
-  // concurrent discovery.
-  G1CMIsAliveClosure _is_alive_closure;
-
-  // ("Weak") Reference processing support
-  ReferenceProcessor* _ref_processor;
+  // into the Concurrent Marking reference processor as the
+  // _is_alive_non_header field. Supplying a value for the
+  // _is_alive_non_header field is optional but doing so prevents
+  // unnecessary additions to the discovered lists during reference
+  // discovery.
+  G1CMIsAliveClosure _is_alive_closure_cm;
 
   enum G1H_process_strong_roots_tasks {
     G1H_PS_mark_stack_oops_do,
@@ -873,6 +968,7 @@
   // specified by the policy object.
   jint initialize();
 
+  // Initialize weak reference processing.
   virtual void ref_processing_init();
 
   void set_par_threads(int t) {
@@ -924,8 +1020,13 @@
   // The shared block offset table array.
   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
 
-  // Reference Processing accessor
-  ReferenceProcessor* ref_processor() { return _ref_processor; }
+  // Reference Processing accessors
+
+  // The STW reference processor....
+  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
+
+  // The Concurent Marking reference processor...
+  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 
   virtual size_t capacity() const;
   virtual size_t used() const;
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -152,8 +152,12 @@
 
   _summary(new Summary()),
 
+  _cur_clear_ct_time_ms(0.0),
+
+  _cur_ref_proc_time_ms(0.0),
+  _cur_ref_enq_time_ms(0.0),
+
 #ifndef PRODUCT
-  _cur_clear_ct_time_ms(0.0),
   _min_clear_cc_time_ms(-1.0),
   _max_clear_cc_time_ms(-1.0),
   _cur_clear_cc_time_ms(0.0),
@@ -459,15 +463,16 @@
 // ParallelScavengeHeap::initialize()). We might change this in the
 // future, but it's a good start.
 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
+private:
+  size_t size_to_region_num(size_t byte_size) {
+    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
+  }
 
 public:
   G1YoungGenSizer() {
     initialize_flags();
     initialize_size_info();
   }
-  size_t size_to_region_num(size_t byte_size) {
-    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
-  }
   size_t min_young_region_num() {
     return size_to_region_num(_min_gen0_size);
   }
@@ -501,11 +506,10 @@
 
   if (FLAG_IS_CMDLINE(NewRatio)) {
     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
-      gclog_or_tty->print_cr("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
+      warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
     } else {
       // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
-      size_t heap_regions = sizer.size_to_region_num(_g1->n_regions());
-      update_young_list_size_using_newratio(heap_regions);
+      update_young_list_size_using_newratio(_g1->n_regions());
       _using_new_ratio_calculations = true;
     }
   }
@@ -1479,6 +1483,8 @@
 #endif
     print_stats(1, "Other", other_time_ms);
     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
+    print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
+    print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
 
     for (int i = 0; i < _aux_num; ++i) {
       if (_cur_aux_times_set[i]) {
@@ -1519,11 +1525,17 @@
   }
 
   if (_last_full_young_gc) {
-    ergo_verbose2(ErgoPartiallyYoungGCs,
-                  "start partially-young GCs",
-                  ergo_format_byte_perc("known garbage"),
-                  _known_garbage_bytes, _known_garbage_ratio * 100.0);
-    set_full_young_gcs(false);
+    if (!last_pause_included_initial_mark) {
+      ergo_verbose2(ErgoPartiallyYoungGCs,
+                    "start partially-young GCs",
+                    ergo_format_byte_perc("known garbage"),
+                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
+      set_full_young_gcs(false);
+    } else {
+      ergo_verbose0(ErgoPartiallyYoungGCs,
+                    "do not start partially-young GCs",
+                    ergo_format_reason("concurrent cycle is about to start"));
+    }
     _last_full_young_gc = false;
   }
 
@@ -2485,6 +2497,13 @@
       // initiate a new cycle.
 
       set_during_initial_mark_pause();
+      // We do not allow non-full young GCs during marking.
+      if (!full_young_gcs()) {
+        set_full_young_gcs(true);
+        ergo_verbose0(ErgoPartiallyYoungGCs,
+                      "end partially-young GCs",
+                      ergo_format_reason("concurrent cycle is about to start"));
+      }
 
       // And we can now clear initiate_conc_mark_if_possible() as
       // we've already acted on it.
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -119,6 +119,8 @@
   double _cur_satb_drain_time_ms;
   double _cur_clear_ct_time_ms;
   bool   _satb_drain_time_set;
+  double _cur_ref_proc_time_ms;
+  double _cur_ref_enq_time_ms;
 
 #ifndef PRODUCT
   // Card Table Count Cache stats
@@ -986,6 +988,14 @@
     _cur_aux_times_ms[i] += ms;
   }
 
+  void record_ref_proc_time(double ms) {
+    _cur_ref_proc_time_ms = ms;
+  }
+
+  void record_ref_enq_time(double ms) {
+    _cur_ref_enq_time_ms = ms;
+  }
+
 #ifndef PRODUCT
   void record_cc_clear_time(double ms) {
     if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
@@ -1139,6 +1149,10 @@
     return young_list_length < young_list_max_length;
   }
 
+  size_t young_list_max_length() {
+    return _young_list_max_length;
+  }
+
   void update_region_num(bool young);
 
   bool full_young_gcs() {
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -62,6 +62,8 @@
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
+  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
+
   GenMarkSweep::_ref_processor = rp;
   rp->setup_policy(clear_all_softrefs);
 
@@ -139,6 +141,8 @@
 
   // Process reference objects found during marking
   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
+  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
+
   rp->setup_policy(clear_all_softrefs);
   rp->process_discovered_references(&GenMarkSweep::is_alive,
                                     &GenMarkSweep::keep_alive,
@@ -166,7 +170,6 @@
   GenMarkSweep::follow_mdo_weak_refs();
   assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
 
-
   // Visit interned string tables and delete unmarked oops
   StringTable::unlink(&GenMarkSweep::is_alive);
   // Clean up unreferenced symbols in symbol table.
@@ -346,7 +349,8 @@
                            NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_pointer_closure);
 
-  g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
+  assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
+  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -27,19 +27,69 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 
-G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
-                                         VirtualSpace* g1_storage_addr) :
+G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
+                                           const char* name,
+                                           int ordinal, int spaces,
+                                           size_t min_capacity,
+                                           size_t max_capacity,
+                                           size_t curr_capacity)
+  : GenerationCounters(name, ordinal, spaces, min_capacity,
+                       max_capacity, curr_capacity), _g1mm(g1mm) { }
+
+// We pad the capacity three times given that the young generation
+// contains three spaces (eden and two survivors).
+G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm,
+                                                     const char* name)
+  : G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */,
+               G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
+               G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
+               G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
+  update_all();
+}
+
+G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
+                                                 const char* name)
+  : G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */,
+               G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
+               G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
+               G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
+  update_all();
+}
+
+void G1YoungGenerationCounters::update_all() {
+  size_t committed =
+            G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
+  _current_size->set_value(committed);
+}
+
+void G1OldGenerationCounters::update_all() {
+  size_t committed =
+            G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
+  _current_size->set_value(committed);
+}
+
+G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
   _g1h(g1h),
   _incremental_collection_counters(NULL),
   _full_collection_counters(NULL),
-  _non_young_collection_counters(NULL),
+  _old_collection_counters(NULL),
   _old_space_counters(NULL),
   _young_collection_counters(NULL),
   _eden_counters(NULL),
   _from_counters(NULL),
   _to_counters(NULL),
-  _g1_storage_addr(g1_storage_addr)
-{
+
+  _overall_reserved(0),
+  _overall_committed(0),    _overall_used(0),
+  _young_region_num(0),
+  _young_gen_committed(0),
+  _eden_committed(0),       _eden_used(0),
+  _survivor_committed(0),   _survivor_used(0),
+  _old_committed(0),        _old_used(0) {
+
+  _overall_reserved = g1h->max_capacity();
+  recalculate_sizes();
+
   // Counters for GC collections
   //
   //  name "collector.0".  In a generational collector this would be the
@@ -69,110 +119,147 @@
   // generational GC terms.  The "1, 1" parameters are for
   // the n-th generation (=1) with 1 space.
   // Counters are created from minCapacity, maxCapacity, and capacity
-  _non_young_collection_counters =
-    new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
+  _old_collection_counters = new G1OldGenerationCounters(this, "old");
 
   //  name  "generation.1.space.0"
   // Counters are created from maxCapacity, capacity, initCapacity,
   // and used.
-  _old_space_counters = new HSpaceCounters("space", 0,
-    _g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
+  _old_space_counters = new HSpaceCounters("space", 0 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(old_space_committed()) /* init_capacity */,
+   _old_collection_counters);
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
   //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
-  // See  _non_young_collection_counters for additional counters
-  _young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
+  // See  _old_collection_counters for additional counters
+  _young_collection_counters = new G1YoungGenerationCounters(this, "young");
 
-  // Replace "max_heap_byte_size() with maximum young gen size for
-  // g1Collectedheap
   //  name "generation.0.space.0"
   // See _old_space_counters for additional counters
-  _eden_counters = new HSpaceCounters("eden", 0,
-    _g1h->max_capacity(), eden_space_committed(),
+  _eden_counters = new HSpaceCounters("eden", 0 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(eden_space_committed()) /* init_capacity */,
     _young_collection_counters);
 
   //  name "generation.0.space.1"
   // See _old_space_counters for additional counters
   // Set the arguments to indicate that this survivor space is not used.
-  _from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
+  _from_counters = new HSpaceCounters("s0", 1 /* ordinal */,
+    pad_capacity(0) /* max_capacity */,
+    pad_capacity(0) /* init_capacity */,
     _young_collection_counters);
+  // Given that this survivor space is not used, we update it here
+  // once to reflect that its used space is 0 so that we don't have to
+  // worry about updating it again later.
+  _from_counters->update_used(0);
 
   //  name "generation.0.space.2"
   // See _old_space_counters for additional counters
-  _to_counters = new HSpaceCounters("s1", 2,
-    _g1h->max_capacity(),
-    survivor_space_committed(),
+  _to_counters = new HSpaceCounters("s1", 2 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(survivor_space_committed()) /* init_capacity */,
     _young_collection_counters);
 }
 
-size_t G1MonitoringSupport::overall_committed() {
-  return g1h()->capacity();
-}
+void G1MonitoringSupport::recalculate_sizes() {
+  G1CollectedHeap* g1 = g1h();
+
+  // Recalculate all the sizes from scratch. We assume that this is
+  // called at a point where no concurrent updates to the various
+  // values we read here are possible (i.e., at a STW phase at the end
+  // of a GC).
 
-size_t G1MonitoringSupport::overall_used() {
-  return g1h()->used_unlocked();
-}
+  size_t young_list_length = g1->young_list()->length();
+  size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
+  assert(young_list_length >= survivor_list_length, "invariant");
+  size_t eden_list_length = young_list_length - survivor_list_length;
+  // Max length includes any potential extensions to the young gen
+  // we'll do when the GC locker is active.
+  size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
+  assert(young_list_max_length >= survivor_list_length, "invariant");
+  size_t eden_list_max_length = young_list_max_length - survivor_list_length;
 
-size_t G1MonitoringSupport::eden_space_committed() {
-  return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
-}
+  _overall_used = g1->used_unlocked();
+  _eden_used = eden_list_length * HeapRegion::GrainBytes;
+  _survivor_used = survivor_list_length * HeapRegion::GrainBytes;
+  _young_region_num = young_list_length;
+  _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
+
+  // First calculate the committed sizes that can be calculated independently.
+  _survivor_committed = _survivor_used;
+  _old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
 
-size_t G1MonitoringSupport::eden_space_used() {
-  size_t young_list_length = g1h()->young_list()->length();
-  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
-  size_t survivor_used = survivor_space_used();
-  eden_used = subtract_up_to_zero(eden_used, survivor_used);
-  return eden_used;
-}
+  // Next, start with the overall committed size.
+  _overall_committed = g1->capacity();
+  size_t committed = _overall_committed;
+
+  // Remove the committed size we have calculated so far (for the
+  // survivor and old space).
+  assert(committed >= (_survivor_committed + _old_committed), "sanity");
+  committed -= _survivor_committed + _old_committed;
+
+  // Next, calculate and remove the committed size for the eden.
+  _eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
+  // Somewhat defensive: be robust in case there are inaccuracies in
+  // the calculations
+  _eden_committed = MIN2(_eden_committed, committed);
+  committed -= _eden_committed;
 
-size_t G1MonitoringSupport::survivor_space_committed() {
-  return MAX2(survivor_space_used(),
-              (size_t) HeapRegion::GrainBytes);
-}
+  // Finally, give the rest to the old space...
+  _old_committed += committed;
+  // ..and calculate the young gen committed.
+  _young_gen_committed = _eden_committed + _survivor_committed;
 
-size_t G1MonitoringSupport::survivor_space_used() {
-  size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
-  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
-  return survivor_used;
+  assert(_overall_committed ==
+         (_eden_committed + _survivor_committed + _old_committed),
+         "the committed sizes should add up");
+  // Somewhat defensive: cap the eden used size to make sure it
+  // never exceeds the committed size.
+  _eden_used = MIN2(_eden_used, _eden_committed);
+  // _survivor_committed and _old_committed are calculated in terms of
+  // the corresponding _*_used value, so the next two conditions
+  // should hold.
+  assert(_survivor_used <= _survivor_committed, "post-condition");
+  assert(_old_used <= _old_committed, "post-condition");
 }
 
-size_t G1MonitoringSupport::old_space_committed() {
-  size_t committed = overall_committed();
-  size_t eden_committed = eden_space_committed();
-  size_t survivor_committed = survivor_space_committed();
-  committed = subtract_up_to_zero(committed, eden_committed);
-  committed = subtract_up_to_zero(committed, survivor_committed);
-  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
-  return committed;
-}
+void G1MonitoringSupport::recalculate_eden_size() {
+  G1CollectedHeap* g1 = g1h();
+
+  // When a new eden region is allocated, only the eden_used size is
+  // affected (since we have recalculated everything else at the last GC).
 
-// See the comment near the top of g1MonitoringSupport.hpp for
-// an explanation of these calculations for "used" and "capacity".
-size_t G1MonitoringSupport::old_space_used() {
-  size_t used = overall_used();
-  size_t eden_used = eden_space_used();
-  size_t survivor_used = survivor_space_used();
-  used = subtract_up_to_zero(used, eden_used);
-  used = subtract_up_to_zero(used, survivor_used);
-  return used;
-}
-
-void G1MonitoringSupport::update_counters() {
-  if (UsePerfData) {
-    eden_counters()->update_capacity(eden_space_committed());
-    eden_counters()->update_used(eden_space_used());
-    to_counters()->update_capacity(survivor_space_committed());
-    to_counters()->update_used(survivor_space_used());
-    old_space_counters()->update_capacity(old_space_committed());
-    old_space_counters()->update_used(old_space_used());
-    non_young_collection_counters()->update_all();
+  size_t young_region_num = g1h()->young_list()->length();
+  if (young_region_num > _young_region_num) {
+    size_t diff = young_region_num - _young_region_num;
+    _eden_used += diff * HeapRegion::GrainBytes;
+    // Somewhat defensive: cap the eden used size to make sure it
+    // never exceeds the committed size.
+    _eden_used = MIN2(_eden_used, _eden_committed);
+    _young_region_num = young_region_num;
   }
 }
 
-void G1MonitoringSupport::update_eden_counters() {
+void G1MonitoringSupport::update_sizes() {
+  recalculate_sizes();
   if (UsePerfData) {
-    eden_counters()->update_capacity(eden_space_committed());
+    eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
+    eden_counters()->update_used(eden_space_used());
+    // only the to survivor space (s1) is active, so we don't need to
+    // update the counteres for the from survivor space (s0)
+    to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
+    to_counters()->update_used(survivor_space_used());
+    old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
+    old_space_counters()->update_used(old_space_used());
+    old_collection_counters()->update_all();
+    young_collection_counters()->update_all();
+  }
+}
+
+void G1MonitoringSupport::update_eden_size() {
+  recalculate_eden_size();
+  if (UsePerfData) {
     eden_counters()->update_used(eden_space_used());
   }
 }
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -28,101 +28,95 @@
 #include "gc_implementation/shared/hSpaceCounters.hpp"
 
 class G1CollectedHeap;
-class G1SpaceMonitoringSupport;
 
-// Class for monitoring logical spaces in G1.
-// G1 defines a set of regions as a young
-// collection (analogous to a young generation).
-// The young collection is a logical generation
-// with no fixed chunk (see space.hpp) reflecting
-// the address space for the generation.  In addition
-// to the young collection there is its complement
-// the non-young collection that is simply the regions
-// not in the young collection.  The non-young collection
-// is treated here as a logical old generation only
-// because the monitoring tools expect a generational
-// heap.  The monitoring tools expect that a Space
-// (see space.hpp) exists that describe the
-// address space of young collection and non-young
-// collection and such a view is provided here.
+// Class for monitoring logical spaces in G1. It provides data for
+// both G1's jstat counters as well as G1's memory pools.
+//
+// G1 splits the heap into heap regions and each heap region belongs
+// to one of the following categories:
+//
+// * eden      : regions that have been allocated since the last GC
+// * survivors : regions with objects that survived the last few GCs
+// * old       : long-lived non-humongous regions
+// * humongous : humongous regions
+// * free      : free regions
+//
+// The combination of eden and survivor regions form the equivalent of
+// the young generation in the other GCs. The combination of old and
+// humongous regions form the equivalent of the old generation in the
+// other GCs. Free regions do not have a good equivalent in the other
+// GCs given that they can be allocated as any of the other region types.
 //
-// This class provides interfaces to access
-// the value of variables for the young collection
-// that include the "capacity" and "used" of the
-// young collection along with constant values
-// for the minimum and maximum capacities for
-// the logical spaces.  Similarly for the non-young
-// collection.
-//
-// Also provided are counters for G1 concurrent collections
-// and stop-the-world full heap collecitons.
+// The monitoring tools expect the heap to contain a number of
+// generations (young, old, perm) and each generation to contain a
+// number of spaces (young: eden, survivors, old). Given that G1 does
+// not maintain those spaces physically (e.g., the set of
+// non-contiguous eden regions can be considered as a "logical"
+// space), we'll provide the illusion that those generations and
+// spaces exist. In reality, each generation and space refers to a set
+// of heap regions that are potentially non-contiguous.
 //
-// Below is a description of how "used" and "capactiy"
-// (or committed) is calculated for the logical spaces.
+// This class provides interfaces to access the min, current, and max
+// capacity and current occupancy for each of G1's logical spaces and
+// generations we expose to the monitoring tools. Also provided are
+// counters for G1 concurrent collections and stop-the-world full heap
+// collections.
 //
-// 1) The used space calculation for a pool is not necessarily
-// independent of the others. We can easily get from G1 the overall
-// used space in the entire heap, the number of regions in the young
-// generation (includes both eden and survivors), and the number of
-// survivor regions. So, from that we calculate:
+// Below is a description of how the various sizes are calculated.
 //
-//  survivor_used = survivor_num * region_size
-//  eden_used     = young_region_num * region_size - survivor_used
-//  old_gen_used  = overall_used - eden_used - survivor_used
+// * Current Capacity
 //
-// Note that survivor_used and eden_used are upper bounds. To get the
-// actual value we would have to iterate over the regions and add up
-// ->used(). But that'd be expensive. So, we'll accept some lack of
-// accuracy for those two. But, we have to be careful when calculating
-// old_gen_used, in case we subtract from overall_used more then the
-// actual number and our result goes negative.
+//    - heap_capacity = current heap capacity (e.g., current committed size)
+//    - young_gen_capacity = current max young gen target capacity
+//          (i.e., young gen target capacity + max allowed expansion capacity)
+//    - survivor_capacity = current survivor region capacity
+//    - eden_capacity = young_gen_capacity - survivor_capacity
+//    - old_capacity = heap_capacity - young_gen_capacity
+//
+//    What we do in the above is to distribute the free regions among
+//    eden_capacity and old_capacity.
 //
-// 2) Calculating the used space is straightforward, as described
-// above. However, how do we calculate the committed space, given that
-// we allocate space for the eden, survivor, and old gen out of the
-// same pool of regions? One way to do this is to use the used value
-// as also the committed value for the eden and survivor spaces and
-// then calculate the old gen committed space as follows:
+// * Occupancy
 //
-//  old_gen_committed = overall_committed - eden_committed - survivor_committed
+//    - young_gen_used = current young region capacity
+//    - survivor_used = survivor_capacity
+//    - eden_used = young_gen_used - survivor_used
+//    - old_used = overall_used - young_gen_used
 //
-// Maybe a better way to do that would be to calculate used for eden
-// and survivor as a sum of ->used() over their regions and then
-// calculate committed as region_num * region_size (i.e., what we use
-// to calculate the used space now). This is something to consider
-// in the future.
+//    Unfortunately, we currently only keep track of the number of
+//    currently allocated young and survivor regions + the overall used
+//    bytes in the heap, so the above can be a little inaccurate.
+//
+// * Min Capacity
 //
-// 3) Another decision that is again not straightforward is what is
-// the max size that each memory pool can grow to. One way to do this
-// would be to use the committed size for the max for the eden and
-// survivors and calculate the old gen max as follows (basically, it's
-// a similar pattern to what we use for the committed space, as
-// described above):
+//    We set this to 0 for all spaces. We could consider setting the old
+//    min capacity to the min capacity of the heap (see 7078465).
+//
+// * Max Capacity
 //
-//  old_gen_max = overall_max - eden_max - survivor_max
+//    For jstat, we set the max capacity of all spaces to heap_capacity,
+//    given that we don't always have a reasonably upper bound on how big
+//    each space can grow. For the memory pools, we actually make the max
+//    capacity undefined. We could consider setting the old max capacity
+//    to the max capacity of the heap (see 7078465).
 //
-// Unfortunately, the above makes the max of each pool fluctuate over
-// time and, even though this is allowed according to the spec, it
-// broke several assumptions in the M&M framework (there were cases
-// where used would reach a value greater than max). So, for max we
-// use -1, which means "undefined" according to the spec.
+// If we had more accurate occupancy / capacity information per
+// region set the above calculations would be greatly simplified and
+// be made more accurate.
 //
-// 4) Now, there is a very subtle issue with all the above. The
-// framework will call get_memory_usage() on the three pools
-// asynchronously. As a result, each call might get a different value
-// for, say, survivor_num which will yield inconsistent values for
-// eden_used, survivor_used, and old_gen_used (as survivor_num is used
-// in the calculation of all three). This would normally be
-// ok. However, it's possible that this might cause the sum of
-// eden_used, survivor_used, and old_gen_used to go over the max heap
-// size and this seems to sometimes cause JConsole (and maybe other
-// clients) to get confused. There's not a really an easy / clean
-// solution to this problem, due to the asynchrounous nature of the
-// framework.
+// We update all the above synchronously and we store the results in
+// fields so that we just read said fields when needed. A subtle point
+// is that all the above sizes need to be recalculated when the old
+// gen changes capacity (after a GC or after a humongous allocation)
+// but only the eden occupancy changes when a new eden region is
+// allocated. So, in the latter case we have minimal recalcuation to
+// do which is important as we want to keep the eden region allocation
+// path as low-overhead as possible.
 
 class G1MonitoringSupport : public CHeapObj {
+  friend class VMStructs;
+
   G1CollectedHeap* _g1h;
-  VirtualSpace* _g1_storage_addr;
 
   // jstat performance counters
   //  incremental collections both fully and partially young
@@ -133,9 +127,9 @@
   // _from_counters, and _to_counters are associated with
   // this "generational" counter.
   GenerationCounters*  _young_collection_counters;
-  //  non-young collection set counters. The _old_space_counters
+  //  old collection set counters. The _old_space_counters
   // below are associated with this "generational" counter.
-  GenerationCounters*  _non_young_collection_counters;
+  GenerationCounters*  _old_collection_counters;
   // Counters for the capacity and used for
   //   the whole heap
   HSpaceCounters*      _old_space_counters;
@@ -145,6 +139,27 @@
   HSpaceCounters*      _from_counters;
   HSpaceCounters*      _to_counters;
 
+  // When it's appropriate to recalculate the various sizes (at the
+  // end of a GC, when a new eden region is allocated, etc.) we store
+  // them here so that we can easily report them when needed and not
+  // have to recalculate them every time.
+
+  size_t _overall_reserved;
+  size_t _overall_committed;
+  size_t _overall_used;
+
+  size_t _young_region_num;
+  size_t _young_gen_committed;
+  size_t _eden_committed;
+  size_t _eden_used;
+  size_t _survivor_committed;
+  size_t _survivor_used;
+
+  size_t _old_committed;
+  size_t _old_used;
+
+  G1CollectedHeap* g1h() { return _g1h; }
+
   // It returns x - y if x > y, 0 otherwise.
   // As described in the comment above, some of the inputs to the
   // calculations we have to do are obtained concurrently and hence
@@ -160,15 +175,35 @@
     }
   }
 
+  // Recalculate all the sizes.
+  void recalculate_sizes();
+  // Recalculate only what's necessary when a new eden region is allocated.
+  void recalculate_eden_size();
+
  public:
-  G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
+  G1MonitoringSupport(G1CollectedHeap* g1h);
 
-  G1CollectedHeap* g1h() { return _g1h; }
-  VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
+  // Unfortunately, the jstat tool assumes that no space has 0
+  // capacity. In our case, given that each space is logical, it's
+  // possible that no regions will be allocated to it, hence to have 0
+  // capacity (e.g., if there are no survivor regions, the survivor
+  // space has 0 capacity). The way we deal with this is to always pad
+  // each capacity value we report to jstat by a very small amount to
+  // make sure that it's never zero. Given that we sometimes have to
+  // report a capacity of a generation that contains several spaces
+  // (e.g., young gen includes one eden, two survivor spaces), the
+  // mult parameter is provided in order to adding the appropriate
+  // padding multiple times so that the capacities add up correctly.
+  static size_t pad_capacity(size_t size_bytes, size_t mult = 1) {
+    return size_bytes + MinObjAlignmentInBytes * mult;
+  }
 
-  // Performance Counter accessors
-  void update_counters();
-  void update_eden_counters();
+  // Recalculate all the sizes from scratch and update all the jstat
+  // counters accordingly.
+  void update_sizes();
+  // Recalculate only what's necessary when a new eden region is
+  // allocated and update any jstat counters that need to be updated.
+  void update_eden_size();
 
   CollectorCounters* incremental_collection_counters() {
     return _incremental_collection_counters;
@@ -176,8 +211,11 @@
   CollectorCounters* full_collection_counters() {
     return _full_collection_counters;
   }
-  GenerationCounters* non_young_collection_counters() {
-    return _non_young_collection_counters;
+  GenerationCounters* young_collection_counters() {
+    return _young_collection_counters;
+  }
+  GenerationCounters* old_collection_counters() {
+    return _old_collection_counters;
   }
   HSpaceCounters*      old_space_counters() { return _old_space_counters; }
   HSpaceCounters*      eden_counters() { return _eden_counters; }
@@ -187,17 +225,45 @@
   // Monitoring support used by
   //   MemoryService
   //   jstat counters
-  size_t overall_committed();
-  size_t overall_used();
+
+  size_t overall_reserved()           { return _overall_reserved;     }
+  size_t overall_committed()          { return _overall_committed;    }
+  size_t overall_used()               { return _overall_used;         }
 
-  size_t eden_space_committed();
-  size_t eden_space_used();
+  size_t young_gen_committed()        { return _young_gen_committed;  }
+  size_t young_gen_max()              { return overall_reserved();    }
+  size_t eden_space_committed()       { return _eden_committed;       }
+  size_t eden_space_used()            { return _eden_used;            }
+  size_t survivor_space_committed()   { return _survivor_committed;   }
+  size_t survivor_space_used()        { return _survivor_used;        }
+
+  size_t old_gen_committed()          { return old_space_committed(); }
+  size_t old_gen_max()                { return overall_reserved();    }
+  size_t old_space_committed()        { return _old_committed;        }
+  size_t old_space_used()             { return _old_used;             }
+};
 
-  size_t survivor_space_committed();
-  size_t survivor_space_used();
+class G1GenerationCounters: public GenerationCounters {
+protected:
+  G1MonitoringSupport* _g1mm;
+
+public:
+  G1GenerationCounters(G1MonitoringSupport* g1mm,
+                       const char* name, int ordinal, int spaces,
+                       size_t min_capacity, size_t max_capacity,
+                       size_t curr_capacity);
+};
 
-  size_t old_space_committed();
-  size_t old_space_used();
+class G1YoungGenerationCounters: public G1GenerationCounters {
+public:
+  G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
+  virtual void update_all();
+};
+
+class G1OldGenerationCounters: public G1GenerationCounters {
+public:
+  G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
+  virtual void update_all();
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -34,6 +34,7 @@
 class CMMarkStack;
 class G1ParScanThreadState;
 class CMTask;
+class ReferenceProcessor;
 
 // A class that scans oops in a given heap region (much as OopsInGenClosure
 // scans oops in a generation.)
@@ -59,8 +60,10 @@
 
 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
 public:
-  G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+  G1ParPushHeapRSClosure(G1CollectedHeap* g1,
+                         G1ParScanThreadState* par_scan_state):
     G1ParClosureSuper(g1, par_scan_state) { }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
@@ -68,8 +71,13 @@
 
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
-  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    G1ParClosureSuper(g1, par_scan_state) { }
+  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1, par_scan_state)
+  {
+    assert(_ref_processor == NULL, "sanity");
+    _ref_processor = rp;
+  }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
@@ -92,9 +100,18 @@
 
 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
   G1ParScanClosure _scanner;
+
 public:
-  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
+  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
+  {
+    assert(_ref_processor == NULL, "sanity");
+  }
+
+  G1ParScanClosure* scanner() {
+    return &_scanner;
+  }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -105,7 +122,8 @@
   G1ParScanClosure *_scanner;
 protected:
   template <class T> void mark_object(T* p);
-  oop copy_to_survivor_space(oop obj, bool should_mark_copy);
+  oop copy_to_survivor_space(oop obj, bool should_mark_root,
+                                      bool should_mark_copy);
 public:
   G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
                   G1ParScanClosure *scanner) :
@@ -116,10 +134,20 @@
          bool do_mark_object>
 class G1ParCopyClosure : public G1ParCopyHelper {
   G1ParScanClosure _scanner;
+
   template <class T> void do_oop_work(T* p);
+
 public:
-  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
+  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
+                   ReferenceProcessor* rp) :
+    _scanner(g1, par_scan_state, rp),
+    G1ParCopyHelper(g1, par_scan_state, &_scanner)
+  {
+    assert(_ref_processor == NULL, "sanity");
+  }
+
+  G1ParScanClosure* scanner() { return &_scanner; }
+
   template <class T> void do_oop_nv(T* p) {
     do_oop_work(p);
   }
@@ -129,21 +157,25 @@
 
 typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
 typedef G1ParCopyClosure<true,  G1BarrierNone, false> G1ParScanPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
+
 typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
 typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
+
+// The following closure types are no longer used but are retained
+// for historical reasons:
+// typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
+// typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
 
-// This is the only case when we set skip_cset_test. Basically, this
-// closure is (should?) only be called directly while we're draining
-// the overflow and task queues. In that case we know that the
-// reference in question points into the collection set, otherwise we
-// would not have pushed it on the queue. The following is defined in
-// g1_specialized_oop_closures.hpp.
-// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
-// We need a separate closure to handle references during evacuation
-// failure processing, as we cannot asume that the reference already
-// points into the collection set (like G1ParScanHeapEvacClosure does).
+// The following closure type is defined in g1_specialized_oop_closures.hpp:
+//
+// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+
+// We use a separate closure to handle references during evacuation
+// failure processing.
+// We could have used another instance of G1ParScanHeapEvacClosure
+// (since that closure no longer assumes that the references it
+// handles point into the collection set).
+
 typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public OopClosure {
@@ -152,9 +184,10 @@
   DirtyCardToOopClosure* _dcto_cl;
 public:
   FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
-                        G1CollectedHeap* g1, OopClosure* oc) :
-    _dcto_cl(dcto_cl), _g1(g1), _oc(oc)
-  {}
+                        G1CollectedHeap* g1,
+                        OopClosure* oc) :
+    _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)        { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -234,6 +234,7 @@
   HeapRegion *startRegion = calculateStartRegion(worker_i);
 
   ScanRSClosure scanRScl(oc, worker_i);
+
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
@@ -283,6 +284,7 @@
   double start = os::elapsedTime();
   // Apply the given closure to all remaining log entries.
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
+
   _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
 
   // Now there should be no dirty cards.
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -45,7 +45,7 @@
                                  FilterKind fk) :
   ContiguousSpaceDCTOC(hr, cl, precision, NULL),
   _hr(hr), _fk(fk), _g1(g1)
-{}
+{ }
 
 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
                                                    OopClosure* oc) :
@@ -210,15 +210,17 @@
                                               HeapWord* top,
                                               OopClosure* cl) {
   G1CollectedHeap* g1h = _g1;
+  int oop_size;
+  OopClosure* cl2 = NULL;
 
-  int oop_size;
-
-  OopClosure* cl2 = cl;
   FilterIntoCSClosure intoCSFilt(this, g1h, cl);
   FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
+
   switch (_fk) {
+  case NoFilterKind:          cl2 = cl; break;
   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
+  default:                    ShouldNotReachHere();
   }
 
   // Start filtering what we add to the remembered set. If the object is
@@ -239,16 +241,19 @@
     case NoFilterKind:
       bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
       break;
+
     case IntoCSFilterKind: {
       FilterIntoCSClosure filt(this, g1h, cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
+
     case OutOfRegionFilterKind: {
       FilterOutOfRegionClosure filt(_hr, cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
+
     default:
       ShouldNotReachHere();
     }
@@ -483,12 +488,13 @@
 HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
            MemRegion mr, bool is_zeroed)
   : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
-    _next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
+    _hrs_index(hrs_index),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
     _in_collection_set(false),
     _next_in_special_set(NULL), _orig_end(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
+    _gc_efficiency(0.0),
     _young_type(NotYoung), _next_young_region(NULL),
     _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
 #ifdef ASSERT
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -118,7 +118,6 @@
                   FilterKind fk);
 };
 
-
 // The complicating factor is that BlockOffsetTable diverged
 // significantly, and we need functionality that is only in the G1 version.
 // So I copied that code, which led to an alternate G1 version of
@@ -223,10 +222,6 @@
     ContinuesHumongous
   };
 
-  // The next filter kind that should be used for a "new_dcto_cl" call with
-  // the "traditional" signature.
-  HeapRegionDCTOC::FilterKind _next_fk;
-
   // Requires that the region "mr" be dense with objects, and begin and end
   // with an object.
   void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
@@ -362,6 +357,11 @@
   static int GrainWords;
   static int CardsPerRegion;
 
+  static size_t align_up_to_region_byte_size(size_t sz) {
+    return (sz + (size_t) GrainBytes - 1) &
+                                      ~((1 << (size_t) LogOfHRGrainBytes) - 1);
+  }
+
   // It sets up the heap region size (GrainBytes / GrainWords), as
   // well as other related fields that are based on the heap region
   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
@@ -573,40 +573,14 @@
   // allocated in the current region before the last call to "save_mark".
   void oop_before_save_marks_iterate(OopClosure* cl);
 
-  // This call determines the "filter kind" argument that will be used for
-  // the next call to "new_dcto_cl" on this region with the "traditional"
-  // signature (i.e., the call below.)  The default, in the absence of a
-  // preceding call to this method, is "NoFilterKind", and a call to this
-  // method is necessary for each such call, or else it reverts to the
-  // default.
-  // (This is really ugly, but all other methods I could think of changed a
-  // lot of main-line code for G1.)
-  void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
-    _next_fk = nfk;
-  }
-
   DirtyCardToOopClosure*
   new_dcto_closure(OopClosure* cl,
                    CardTableModRefBS::PrecisionStyle precision,
                    HeapRegionDCTOC::FilterKind fk);
 
-#if WHASSUP
-  DirtyCardToOopClosure*
-  new_dcto_closure(OopClosure* cl,
-                   CardTableModRefBS::PrecisionStyle precision,
-                   HeapWord* boundary) {
-    assert(boundary == NULL, "This arg doesn't make sense here.");
-    DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
-    _next_fk = HeapRegionDCTOC::NoFilterKind;
-    return res;
-  }
-#endif
-
-  //
   // Note the start or end of marking. This tells the heap region
   // that the collector is about to start or has finished (concurrently)
   // marking the heap.
-  //
 
   // Note the start of a marking phase. Record the
   // start of the unmarked area of the region here.
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -56,6 +56,7 @@
 // and maintain that: _length <= _allocated_length <= _max_length
 
 class HeapRegionSeq: public CHeapObj {
+  friend class VMStructs;
 
   // The array that holds the HeapRegions.
   HeapRegion** _regions;
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -29,6 +29,7 @@
 #include "memory/sharedHeap.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.hpp"
+#include "runtime/vmThread.hpp"
 
 // This method removes entries from an SATB buffer that will not be
 // useful to the concurrent marking threads. An entry is removed if it
@@ -252,9 +253,18 @@
       t->satb_mark_queue().apply_closure(_par_closures[worker]);
     }
   }
-  // We'll have worker 0 do this one.
-  if (worker == 0) {
-    shared_satb_queue()->apply_closure(_par_closures[0]);
+
+  // We also need to claim the VMThread so that its parity is updated
+  // otherwise the next call to Thread::possibly_parallel_oops_do inside
+  // a StrongRootsScope might skip the VMThread because it has a stale
+  // parity that matches the parity set by the StrongRootsScope
+  //
+  // Whichever worker succeeds in claiming the VMThread gets to do
+  // the shared queue.
+
+  VMThread* vmt = VMThread::vm_thread();
+  if (vmt->claim_oops_do(true, parity)) {
+    shared_satb_queue()->apply_closure(_par_closures[worker]);
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
+
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+
+#define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
+                                                                              \
+  static_field(HeapRegion, GrainBytes, int)                                   \
+                                                                              \
+  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
+  nonstatic_field(HeapRegionSeq,   _length,  size_t)                          \
+                                                                              \
+  nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
+  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
+  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
+  nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
+                                                                              \
+  nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
+  nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
+  nonstatic_field(G1MonitoringSupport, _survivor_committed, size_t)           \
+  nonstatic_field(G1MonitoringSupport, _survivor_used,      size_t)           \
+  nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
+  nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
+
+
+#define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
+                                                                              \
+  declare_type(G1CollectedHeap, SharedHeap)                                   \
+                                                                              \
+  declare_type(HeapRegion, ContiguousSpace)                                   \
+  declare_toplevel_type(HeapRegionSeq)                                        \
+  declare_toplevel_type(G1MonitoringSupport)                                  \
+                                                                              \
+  declare_toplevel_type(G1CollectedHeap*)                                     \
+  declare_toplevel_type(HeapRegion*)                                          \
+  declare_toplevel_type(G1MonitoringSupport*)                                 \
+
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -198,10 +198,9 @@
 
     allocate_stacks();
 
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery();
+    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     ref_processor()->setup_policy(clear_all_softrefs);
 
     mark_sweep_phase1(clear_all_softrefs);
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -2069,10 +2069,9 @@
     CodeCache::gc_prologue();
     Threads::gc_prologue();
 
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery();
+    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     ref_processor()->setup_policy(maximum_heap_compaction);
 
     bool marked_for_unloading = false;
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -350,10 +350,9 @@
     }
     save_to_space_top_before_gc();
 
-    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    reference_processor()->enable_discovery();
+    reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     reference_processor()->setup_policy(false);
 
     // We track how much was promoted to the next generation for
--- a/src/share/vm/gc_implementation/shared/generationCounters.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/shared/generationCounters.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -26,14 +26,10 @@
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/resourceArea.hpp"
 
-
-GenerationCounters::GenerationCounters(const char* name,
-                                       int ordinal, int spaces,
-                                       VirtualSpace* v):
-                    _virtual_space(v) {
-
+void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
+                                    size_t min_capacity, size_t max_capacity,
+                                    size_t curr_capacity) {
   if (UsePerfData) {
-
     EXCEPTION_MARK;
     ResourceMark rm;
 
@@ -51,18 +47,37 @@
 
     cname = PerfDataManager::counter_name(_name_space, "minCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->committed_size(), CHECK);
+                                     min_capacity, CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->reserved_size(), CHECK);
+                                     max_capacity, CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "capacity");
-    _current_size = PerfDataManager::create_variable(SUN_GC, cname,
-                                     PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->committed_size(), CHECK);
+    _current_size =
+      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+                                       curr_capacity, CHECK);
   }
 }
+
+GenerationCounters::GenerationCounters(const char* name,
+                                       int ordinal, int spaces,
+                                       VirtualSpace* v)
+  : _virtual_space(v) {
+  assert(v != NULL, "don't call this constructor if v == NULL");
+  initialize(name, ordinal, spaces,
+             v->committed_size(), v->reserved_size(), v->committed_size());
+}
+
+GenerationCounters::GenerationCounters(const char* name,
+                                       int ordinal, int spaces,
+                                       size_t min_capacity, size_t max_capacity,
+                                       size_t curr_capacity)
+  : _virtual_space(NULL) {
+  initialize(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity);
+}
+
+void GenerationCounters::update_all() {
+  assert(_virtual_space != NULL, "otherwise, override this method");
+  _current_size->set_value(_virtual_space->committed_size());
+}
--- a/src/share/vm/gc_implementation/shared/generationCounters.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/gc_implementation/shared/generationCounters.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -34,6 +34,11 @@
 class GenerationCounters: public CHeapObj {
   friend class VMStructs;
 
+private:
+  void initialize(const char* name, int ordinal, int spaces,
+                  size_t min_capacity, size_t max_capacity,
+                  size_t curr_capacity);
+
  protected:
   PerfVariable*      _current_size;
   VirtualSpace*      _virtual_space;
@@ -48,11 +53,18 @@
   char*              _name_space;
 
   // This constructor is only meant for use with the PSGenerationCounters
-  // constructor.  The need for such an constructor should be eliminated
+  // constructor. The need for such an constructor should be eliminated
   // when VirtualSpace and PSVirtualSpace are unified.
-  GenerationCounters() : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
+  GenerationCounters()
+             : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
+
+  // This constructor is used for subclasses that do not have a space
+  // associated with them (e.g, in G1).
+  GenerationCounters(const char* name, int ordinal, int spaces,
+                     size_t min_capacity, size_t max_capacity,
+                     size_t curr_capacity);
+
  public:
-
   GenerationCounters(const char* name, int ordinal, int spaces,
                      VirtualSpace* v);
 
@@ -60,10 +72,7 @@
     if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
   }
 
-  virtual void update_all() {
-    _current_size->set_value(_virtual_space == NULL ? 0 :
-                             _virtual_space->committed_size());
-  }
+  virtual void update_all();
 
   const char* name_space() const        { return _name_space; }
 
--- a/src/share/vm/memory/genCollectedHeap.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -599,8 +599,7 @@
           // atomic wrt other collectors in this configuration, we
           // are guaranteed to have empty discovered ref lists.
           if (rp->discovery_is_atomic()) {
-            rp->verify_no_references_recorded();
-            rp->enable_discovery();
+            rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
             rp->setup_policy(do_clear_all_soft_refs);
           } else {
             // collect() below will enable discovery as appropriate
--- a/src/share/vm/memory/referenceProcessor.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/memory/referenceProcessor.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -35,42 +35,8 @@
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
-const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
 bool             ReferenceProcessor::_pending_list_uses_discovered_field = false;
 
-// List of discovered references.
-class DiscoveredList {
-public:
-  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
-  oop head() const     {
-     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
-                                _oop_head;
-  }
-  HeapWord* adr_head() {
-    return UseCompressedOops ? (HeapWord*)&_compressed_head :
-                               (HeapWord*)&_oop_head;
-  }
-  void   set_head(oop o) {
-    if (UseCompressedOops) {
-      // Must compress the head ptr.
-      _compressed_head = oopDesc::encode_heap_oop(o);
-    } else {
-      _oop_head = o;
-    }
-  }
-  bool   empty() const          { return head() == NULL; }
-  size_t length()               { return _len; }
-  void   set_length(size_t len) { _len = len;  }
-  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
-  void   dec_length(size_t dec) { _len -= dec; }
-private:
-  // Set value depending on UseCompressedOops. This could be a template class
-  // but then we have to fix all the instantiations and declarations that use this class.
-  oop       _oop_head;
-  narrowOop _compressed_head;
-  size_t _len;
-};
-
 void referenceProcessor_init() {
   ReferenceProcessor::init_statics();
 }
@@ -112,7 +78,8 @@
   _discovery_is_mt     = mt_discovery;
   _num_q               = MAX2(1, mt_processing_degree);
   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
-  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
+  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList,
+                                          _max_num_q * number_of_subclasses_of_ref());
   if (_discoveredSoftRefs == NULL) {
     vm_exit_during_initialization("Could not allocated RefProc Array");
   }
@@ -120,7 +87,7 @@
   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
   // Initialized all entries to NULL
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     _discoveredSoftRefs[i].set_head(NULL);
     _discoveredSoftRefs[i].set_length(0);
   }
@@ -134,19 +101,15 @@
 #ifndef PRODUCT
 void ReferenceProcessor::verify_no_references_recorded() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-    guarantee(_discoveredSoftRefs[i].empty(),
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+    guarantee(_discoveredSoftRefs[i].is_empty(),
               "Found non-empty discovered list");
   }
 }
 #endif
 
 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
-  // Should this instead be
-  // for (int i = 0; i < subclasses_of_ref; i++_ {
-  //   for (int j = 0; j < _num_q; j++) {
-  //     int index = i * _max_num_q + j;
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (UseCompressedOops) {
       f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
     } else {
@@ -404,7 +367,7 @@
     // allocated and are indexed into.
     assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
     for (int j = 0;
-         j < subclasses_of_ref;
+         j < ReferenceProcessor::number_of_subclasses_of_ref();
          j++, index += _n_queues) {
       _ref_processor.enqueue_discovered_reflist(
         _refs_lists[index], _pending_list_addr);
@@ -424,7 +387,7 @@
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
-    for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+    for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
       _discoveredSoftRefs[i].set_head(NULL);
       _discoveredSoftRefs[i].set_length(0);
@@ -432,119 +395,7 @@
   }
 }
 
-// Iterator for the list of discovered references.
-class DiscoveredListIterator {
-public:
-  inline DiscoveredListIterator(DiscoveredList&    refs_list,
-                                OopClosure*        keep_alive,
-                                BoolObjectClosure* is_alive);
-
-  // End Of List.
-  inline bool has_next() const { return _ref != NULL; }
-
-  // Get oop to the Reference object.
-  inline oop obj() const { return _ref; }
-
-  // Get oop to the referent object.
-  inline oop referent() const { return _referent; }
-
-  // Returns true if referent is alive.
-  inline bool is_referent_alive() const;
-
-  // Loads data for the current reference.
-  // The "allow_null_referent" argument tells us to allow for the possibility
-  // of a NULL referent in the discovered Reference object. This typically
-  // happens in the case of concurrent collectors that may have done the
-  // discovery concurrently, or interleaved, with mutator execution.
-  inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
-
-  // Move to the next discovered reference.
-  inline void next();
-
-  // Remove the current reference from the list
-  inline void remove();
-
-  // Make the Reference object active again.
-  inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
-
-  // Make the referent alive.
-  inline void make_referent_alive() {
-    if (UseCompressedOops) {
-      _keep_alive->do_oop((narrowOop*)_referent_addr);
-    } else {
-      _keep_alive->do_oop((oop*)_referent_addr);
-    }
-  }
-
-  // Update the discovered field.
-  inline void update_discovered() {
-    // First _prev_next ref actually points into DiscoveredList (gross).
-    if (UseCompressedOops) {
-      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
-        _keep_alive->do_oop((narrowOop*)_prev_next);
-      }
-    } else {
-      if (!oopDesc::is_null(*(oop*)_prev_next)) {
-        _keep_alive->do_oop((oop*)_prev_next);
-      }
-    }
-  }
-
-  // NULL out referent pointer.
-  inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
-
-  // Statistics
-  NOT_PRODUCT(
-  inline size_t processed() const { return _processed; }
-  inline size_t removed() const   { return _removed; }
-  )
-
-  inline void move_to_next();
-
-private:
-  DiscoveredList&    _refs_list;
-  HeapWord*          _prev_next;
-  oop                _prev;
-  oop                _ref;
-  HeapWord*          _discovered_addr;
-  oop                _next;
-  HeapWord*          _referent_addr;
-  oop                _referent;
-  OopClosure*        _keep_alive;
-  BoolObjectClosure* _is_alive;
-  DEBUG_ONLY(
-  oop                _first_seen; // cyclic linked list check
-  )
-  NOT_PRODUCT(
-  size_t             _processed;
-  size_t             _removed;
-  )
-};
-
-inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList&    refs_list,
-                                                      OopClosure*        keep_alive,
-                                                      BoolObjectClosure* is_alive)
-  : _refs_list(refs_list),
-    _prev_next(refs_list.adr_head()),
-    _prev(NULL),
-    _ref(refs_list.head()),
-#ifdef ASSERT
-    _first_seen(refs_list.head()),
-#endif
-#ifndef PRODUCT
-    _processed(0),
-    _removed(0),
-#endif
-    _next(NULL),
-    _keep_alive(keep_alive),
-    _is_alive(is_alive)
-{ }
-
-inline bool DiscoveredListIterator::is_referent_alive() const {
-  return _is_alive->do_object_b(_referent);
-}
-
-inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
+void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
   oop discovered = java_lang_ref_Reference::discovered(_ref);
   assert(_discovered_addr && discovered->is_oop_or_null(),
@@ -560,13 +411,7 @@
          "bad referent");
 }
 
-inline void DiscoveredListIterator::next() {
-  _prev_next = _discovered_addr;
-  _prev = _ref;
-  move_to_next();
-}
-
-inline void DiscoveredListIterator::remove() {
+void DiscoveredListIterator::remove() {
   assert(_ref->is_oop(), "Dropping a bad reference");
   oop_store_raw(_discovered_addr, NULL);
 
@@ -592,15 +437,29 @@
   _refs_list.dec_length(1);
 }
 
-inline void DiscoveredListIterator::move_to_next() {
-  if (_ref == _next) {
-    // End of the list.
-    _ref = NULL;
+// Make the Reference object active again.
+void DiscoveredListIterator::make_active() {
+  // For G1 we don't want to use set_next - it
+  // will dirty the card for the next field of
+  // the reference object and will fail
+  // CT verification.
+  if (UseG1GC) {
+    BarrierSet* bs = oopDesc::bs();
+    HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
+
+    if (UseCompressedOops) {
+      bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
+    } else {
+      bs->write_ref_field_pre((oop*)next_addr, NULL);
+    }
+    java_lang_ref_Reference::set_next_raw(_ref, NULL);
   } else {
-    _ref = _next;
+    java_lang_ref_Reference::set_next(_ref, NULL);
   }
-  assert(_ref != _first_seen, "cyclic ref_list found");
-  NOT_PRODUCT(_processed++);
+}
+
+void DiscoveredListIterator::clear_referent() {
+  oop_store_raw(_referent_addr, NULL);
 }
 
 // NOTE: process_phase*() are largely similar, and at a high level
@@ -786,10 +645,9 @@
 
 void ReferenceProcessor::abandon_partial_discovery() {
   // loop over the lists
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
-      gclog_or_tty->print_cr("\nAbandoning %s discovered list",
-                             list_name(i));
+      gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
     }
     abandon_partial_discovered_list(_discoveredSoftRefs[i]);
   }
@@ -858,6 +716,14 @@
   bool _clear_referent;
 };
 
+void ReferenceProcessor::set_discovered(oop ref, oop value) {
+  if (_discovered_list_needs_barrier) {
+    java_lang_ref_Reference::set_discovered(ref, value);
+  } else {
+    java_lang_ref_Reference::set_discovered_raw(ref, value);
+  }
+}
+
 // Balances reference queues.
 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
 // queues[0, 1, ..., _num_q-1] because only the first _num_q
@@ -915,9 +781,9 @@
         // Add the chain to the to list.
         if (ref_lists[to_idx].head() == NULL) {
           // to list is empty. Make a loop at the end.
-          java_lang_ref_Reference::set_discovered(move_tail, move_tail);
+          set_discovered(move_tail, move_tail);
         } else {
-          java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
+          set_discovered(move_tail, ref_lists[to_idx].head());
         }
         ref_lists[to_idx].set_head(move_head);
         ref_lists[to_idx].inc_length(refs_to_move);
@@ -1038,11 +904,7 @@
 
 void ReferenceProcessor::clean_up_discovered_references() {
   // loop over the lists
-  // Should this instead be
-  // for (int i = 0; i < subclasses_of_ref; i++_ {
-  //   for (int j = 0; j < _num_q; j++) {
-  //     int index = i * _max_num_q + j;
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
       gclog_or_tty->print_cr(
         "\nScrubbing %s discovered list of Null referents",
@@ -1260,6 +1122,8 @@
     }
   }
 
+  ResourceMark rm;      // Needed for tracing.
+
   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
   const oop  discovered = java_lang_ref_Reference::discovered(obj);
   assert(discovered->is_oop_or_null(), "bad discovered field");
@@ -1472,7 +1336,9 @@
 }
 
 const char* ReferenceProcessor::list_name(int i) {
-   assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
+   assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
+          "Out of bounds index");
+
    int j = i / _max_num_q;
    switch (j) {
      case 0: return "SoftRef";
@@ -1493,7 +1359,7 @@
 #ifndef PRODUCT
 void ReferenceProcessor::clear_discovered_references() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     clear_discovered_references(_discoveredSoftRefs[i]);
   }
 }
--- a/src/share/vm/memory/referenceProcessor.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/memory/referenceProcessor.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -48,18 +48,175 @@
 // forward references
 class ReferencePolicy;
 class AbstractRefProcTaskExecutor;
-class DiscoveredList;
+
+// List of discovered references.
+class DiscoveredList {
+public:
+  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
+  oop head() const     {
+     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
+                                _oop_head;
+  }
+  HeapWord* adr_head() {
+    return UseCompressedOops ? (HeapWord*)&_compressed_head :
+                               (HeapWord*)&_oop_head;
+  }
+  void set_head(oop o) {
+    if (UseCompressedOops) {
+      // Must compress the head ptr.
+      _compressed_head = oopDesc::encode_heap_oop(o);
+    } else {
+      _oop_head = o;
+    }
+  }
+  bool   is_empty() const       { return head() == NULL; }
+  size_t length()               { return _len; }
+  void   set_length(size_t len) { _len = len;  }
+  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
+  void   dec_length(size_t dec) { _len -= dec; }
+private:
+  // Set value depending on UseCompressedOops. This could be a template class
+  // but then we have to fix all the instantiations and declarations that use this class.
+  oop       _oop_head;
+  narrowOop _compressed_head;
+  size_t _len;
+};
+
+// Iterator for the list of discovered references.
+class DiscoveredListIterator {
+private:
+  DiscoveredList&    _refs_list;
+  HeapWord*          _prev_next;
+  oop                _prev;
+  oop                _ref;
+  HeapWord*          _discovered_addr;
+  oop                _next;
+  HeapWord*          _referent_addr;
+  oop                _referent;
+  OopClosure*        _keep_alive;
+  BoolObjectClosure* _is_alive;
+
+  DEBUG_ONLY(
+  oop                _first_seen; // cyclic linked list check
+  )
+
+  NOT_PRODUCT(
+  size_t             _processed;
+  size_t             _removed;
+  )
+
+public:
+  inline DiscoveredListIterator(DiscoveredList&    refs_list,
+                                OopClosure*        keep_alive,
+                                BoolObjectClosure* is_alive):
+    _refs_list(refs_list),
+    _prev_next(refs_list.adr_head()),
+    _prev(NULL),
+    _ref(refs_list.head()),
+#ifdef ASSERT
+    _first_seen(refs_list.head()),
+#endif
+#ifndef PRODUCT
+    _processed(0),
+    _removed(0),
+#endif
+    _next(NULL),
+    _keep_alive(keep_alive),
+    _is_alive(is_alive)
+{ }
+
+  // End Of List.
+  inline bool has_next() const { return _ref != NULL; }
+
+  // Get oop to the Reference object.
+  inline oop obj() const { return _ref; }
+
+  // Get oop to the referent object.
+  inline oop referent() const { return _referent; }
+
+  // Returns true if referent is alive.
+  inline bool is_referent_alive() const {
+    return _is_alive->do_object_b(_referent);
+  }
+
+  // Loads data for the current reference.
+  // The "allow_null_referent" argument tells us to allow for the possibility
+  // of a NULL referent in the discovered Reference object. This typically
+  // happens in the case of concurrent collectors that may have done the
+  // discovery concurrently, or interleaved, with mutator execution.
+  void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
+
+  // Move to the next discovered reference.
+  inline void next() {
+    _prev_next = _discovered_addr;
+    _prev = _ref;
+    move_to_next();
+  }
+
+  // Remove the current reference from the list
+  void remove();
+
+  // Make the Reference object active again.
+  void make_active();
+
+  // Make the referent alive.
+  inline void make_referent_alive() {
+    if (UseCompressedOops) {
+      _keep_alive->do_oop((narrowOop*)_referent_addr);
+    } else {
+      _keep_alive->do_oop((oop*)_referent_addr);
+    }
+  }
+
+  // Update the discovered field.
+  inline void update_discovered() {
+    // First _prev_next ref actually points into DiscoveredList (gross).
+    if (UseCompressedOops) {
+      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
+        _keep_alive->do_oop((narrowOop*)_prev_next);
+      }
+    } else {
+      if (!oopDesc::is_null(*(oop*)_prev_next)) {
+        _keep_alive->do_oop((oop*)_prev_next);
+      }
+    }
+  }
+
+  // NULL out referent pointer.
+  void clear_referent();
+
+  // Statistics
+  NOT_PRODUCT(
+  inline size_t processed() const { return _processed; }
+  inline size_t removed() const   { return _removed; }
+  )
+
+  inline void move_to_next() {
+    if (_ref == _next) {
+      // End of the list.
+      _ref = NULL;
+    } else {
+      _ref = _next;
+    }
+    assert(_ref != _first_seen, "cyclic ref_list found");
+    NOT_PRODUCT(_processed++);
+  }
+
+};
 
 class ReferenceProcessor : public CHeapObj {
  protected:
   // Compatibility with pre-4965777 JDK's
   static bool _pending_list_uses_discovered_field;
-  MemRegion   _span; // (right-open) interval of heap
-                     // subject to wkref discovery
-  bool        _discovering_refs;      // true when discovery enabled
-  bool        _discovery_is_atomic;   // if discovery is atomic wrt
-                                      // other collectors in configuration
-  bool        _discovery_is_mt;       // true if reference discovery is MT.
+
+  MemRegion   _span;                    // (right-open) interval of heap
+                                        // subject to wkref discovery
+
+  bool        _discovering_refs;        // true when discovery enabled
+  bool        _discovery_is_atomic;     // if discovery is atomic wrt
+                                        // other collectors in configuration
+  bool        _discovery_is_mt;         // true if reference discovery is MT.
+
   // If true, setting "next" field of a discovered refs list requires
   // write barrier(s).  (Must be true if used in a collector in which
   // elements of a discovered list may be moved during discovery: for
@@ -67,18 +224,19 @@
   // long-term concurrent marking phase that does weak reference
   // discovery.)
   bool        _discovered_list_needs_barrier;
-  BarrierSet* _bs;                    // Cached copy of BarrierSet.
-  bool        _enqueuing_is_done;     // true if all weak references enqueued
-  bool        _processing_is_mt;      // true during phases when
-                                      // reference processing is MT.
-  int         _next_id;               // round-robin mod _num_q counter in
-                                      // support of work distribution
 
-  // For collectors that do not keep GC marking information
+  BarrierSet* _bs;                      // Cached copy of BarrierSet.
+  bool        _enqueuing_is_done;       // true if all weak references enqueued
+  bool        _processing_is_mt;        // true during phases when
+                                        // reference processing is MT.
+  int         _next_id;                 // round-robin mod _num_q counter in
+                                        // support of work distribution
+
+  // For collectors that do not keep GC liveness information
   // in the object header, this field holds a closure that
   // helps the reference processor determine the reachability
-  // of an oop (the field is currently initialized to NULL for
-  // all collectors but the CMS collector).
+  // of an oop. It is currently initialized to NULL for all
+  // collectors except for CMS and G1.
   BoolObjectClosure* _is_alive_non_header;
 
   // Soft ref clearing policies
@@ -102,10 +260,13 @@
   DiscoveredList* _discoveredPhantomRefs;
 
  public:
-  int num_q()                            { return _num_q; }
-  int max_num_q()                        { return _max_num_q; }
-  void set_active_mt_degree(int v)       { _num_q = v; }
-  DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
+  static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
+
+  int num_q()                              { return _num_q; }
+  int max_num_q()                          { return _max_num_q; }
+  void set_active_mt_degree(int v)         { _num_q = v; }
+  DiscoveredList* discovered_soft_refs()   { return _discoveredSoftRefs; }
+
   ReferencePolicy* setup_policy(bool always_clear) {
     _current_soft_ref_policy = always_clear ?
       _always_clear_soft_ref_policy : _default_soft_ref_policy;
@@ -205,6 +366,11 @@
   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 
  protected:
+  // Set the 'discovered' field of the given reference to
+  // the given value - emitting barriers depending upon
+  // the value of _discovered_list_needs_barrier.
+  void set_discovered(oop ref, oop value);
+
   // "Preclean" the given discovered reference list
   // by removing references with strongly reachable referents.
   // Currently used in support of CMS only.
@@ -290,7 +456,19 @@
   void      set_span(MemRegion span) { _span = span; }
 
   // start and stop weak ref discovery
-  void enable_discovery()   { _discovering_refs = true;  }
+  void enable_discovery(bool verify_disabled, bool check_no_refs) {
+#ifdef ASSERT
+    // Verify that we're not currently discovering refs
+    assert(!verify_disabled || !_discovering_refs, "nested call?");
+
+    if (check_no_refs) {
+      // Verify that the discovered lists are empty
+      verify_no_references_recorded();
+    }
+#endif // ASSERT
+    _discovering_refs = true;
+  }
+
   void disable_discovery()  { _discovering_refs = false; }
   bool discovery_enabled()  { return _discovering_refs;  }
 
@@ -365,7 +543,7 @@
 
   ~NoRefDiscovery() {
     if (_was_discovering_refs) {
-      _rp->enable_discovery();
+      _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
     }
   }
 };
--- a/src/share/vm/prims/methodHandleWalk.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -1387,10 +1387,8 @@
   int total = count1 + count2;
   if (count1 != -1 && count2 != -1 && total != 0) {
     // Normalize the collect counts to the invoke_count
-    tty->print("counts %d %d scaled by %d = ", count2, count1, _invoke_count);
     if (count1 != 0) _not_taken_count = (int)(_invoke_count * count1 / (double)total);
     if (count2 != 0) _taken_count = (int)(_invoke_count * count2 / (double)total);
-    tty->print_cr("%d %d", _taken_count, _not_taken_count);
     return true;
   }
   return false;
--- a/src/share/vm/runtime/thread.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/runtime/thread.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -749,8 +749,9 @@
   jint thread_parity = _oops_do_parity;
   if (thread_parity != strong_roots_parity) {
     jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
-    if (res == thread_parity) return true;
-    else {
+    if (res == thread_parity) {
+      return true;
+    } else {
       guarantee(res == strong_roots_parity, "Or else what?");
       assert(SharedHeap::heap()->n_par_threads() > 0,
              "Should only fail when parallel.");
@@ -3905,8 +3906,9 @@
     }
   }
   VMThread* vmt = VMThread::vm_thread();
-  if (vmt->claim_oops_do(is_par, cp))
+  if (vmt->claim_oops_do(is_par, cp)) {
     vmt->oops_do(f, cf);
+  }
 }
 
 #ifndef SERIALGC
--- a/src/share/vm/runtime/vmStructs.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -173,6 +173,7 @@
 #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 #include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp"
+#include "gc_implementation/g1/vmStructs_g1.hpp"
 #endif
 #ifdef COMPILER2
 #include "opto/addnode.hpp"
@@ -694,6 +695,12 @@
       static_field(SystemDictionary,            _box_klasses[0],                               klassOop)                             \
       static_field(SystemDictionary,            _java_system_loader,                           oop)                                  \
                                                                                                                                      \
+  /*************/                                                                                                                    \
+  /* vmSymbols */                                                                                                                    \
+  /*************/                                                                                                                    \
+                                                                                                                                     \
+      static_field(vmSymbols,                   _symbols[0],                                  Symbol*)                               \
+                                                                                                                                     \
   /*******************/                                                                                                              \
   /* HashtableBucket */                                                                                                              \
   /*******************/                                                                                                              \
@@ -1539,6 +1546,7 @@
     declare_type(LoaderConstraintEntry, HashtableEntry<klassOop>)         \
   declare_toplevel_type(HashtableBucket)                                  \
   declare_toplevel_type(SystemDictionary)                                 \
+  declare_toplevel_type(vmSymbols)                                        \
   declare_toplevel_type(ProtectionDomainEntry)                            \
                                                                           \
   declare_toplevel_type(GenericGrowableArray)                             \
@@ -2521,6 +2529,13 @@
   X86_ONLY(declare_constant(frame::entry_frame_call_wrapper_offset))      \
   declare_constant(frame::pc_return_offset)                               \
                                                                           \
+  /*************/                                                         \
+  /* vmSymbols */                                                         \
+  /*************/                                                         \
+                                                                          \
+  declare_constant(vmSymbols::FIRST_SID)                                  \
+  declare_constant(vmSymbols::SID_LIMIT)                                  \
+                                                                          \
   /********************************/                                      \
   /* Calling convention constants */                                      \
   /********************************/                                      \
@@ -2855,6 +2870,9 @@
   VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
                  GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
                  GENERATE_STATIC_VM_STRUCT_ENTRY)
+
+  VM_STRUCTS_G1(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+                GENERATE_STATIC_VM_STRUCT_ENTRY)
 #endif // SERIALGC
 
   VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
@@ -2898,6 +2916,9 @@
                GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 
   VM_TYPES_PARNEW(GENERATE_VM_TYPE_ENTRY)
+
+  VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY,
+              GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 #endif // SERIALGC
 
   VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
@@ -2997,6 +3018,9 @@
   VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_STATIC_VM_STRUCT_ENTRY);
+
+  VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+                CHECK_STATIC_VM_STRUCT_ENTRY);
 #endif // SERIALGC
 
   VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
@@ -3037,6 +3061,9 @@
                CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 
   VM_TYPES_PARNEW(CHECK_VM_TYPE_ENTRY)
+
+  VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
+              CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 #endif // SERIALGC
 
   VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
@@ -3102,6 +3129,8 @@
   debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT));
+  debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT, \
+                           ENSURE_FIELD_TYPE_PRESENT));
 #endif // SERIALGC
   debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT, \
--- a/src/share/vm/services/g1MemoryPool.cpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/services/g1MemoryPool.cpp	Tue Oct 18 17:18:37 2011 -0700
@@ -32,71 +32,44 @@
 G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
                                      const char* name,
                                      size_t init_size,
+                                     size_t max_size,
                                      bool support_usage_threshold) :
-  _g1h(g1h), CollectedMemoryPool(name,
-                                   MemoryPool::Heap,
-                                   init_size,
-                                   undefined_max(),
-                                   support_usage_threshold) {
+  _g1mm(g1h->g1mm()), CollectedMemoryPool(name,
+                                          MemoryPool::Heap,
+                                          init_size,
+                                          max_size,
+                                          support_usage_threshold) {
   assert(UseG1GC, "sanity");
 }
 
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
-  return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->eden_space_used();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->survivor_space_committed();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->survivor_space_used();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->old_space_committed();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->old_space_used();
-}
-
 G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
-                    "G1 Eden",
-                    eden_space_committed(g1h), /* init_size */
+                    "G1 Eden Space",
+                    g1h->g1mm()->eden_space_committed(), /* init_size */
+                    _undefined_max,
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1EdenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = eden_space_committed(_g1h);
+  size_t committed  = _g1mm->eden_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
 
 G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
-                    "G1 Survivor",
-                    survivor_space_committed(g1h), /* init_size */
+                    "G1 Survivor Space",
+                    g1h->g1mm()->survivor_space_committed(), /* init_size */
+                    _undefined_max,
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1SurvivorPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = survivor_space_committed(_g1h);
+  size_t committed  = _g1mm->survivor_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
@@ -104,14 +77,15 @@
 G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
                     "G1 Old Gen",
-                    old_space_committed(g1h), /* init_size */
+                    g1h->g1mm()->old_space_committed(), /* init_size */
+                    _undefined_max,
                     true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = old_space_committed(_g1h);
+  size_t committed  = _g1mm->old_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
--- a/src/share/vm/services/g1MemoryPool.hpp	Mon Sep 26 12:06:58 2011 -0700
+++ b/src/share/vm/services/g1MemoryPool.hpp	Tue Oct 18 17:18:37 2011 -0700
@@ -26,12 +26,11 @@
 #define SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
 
 #ifndef SERIALGC
+#include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryUsage.hpp"
 #endif
 
-class G1CollectedHeap;
-
 // This file contains the three classes that represent the memory
 // pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
 // G1OldGenPool. In G1, unlike our other GCs, we do not have a
@@ -50,37 +49,19 @@
 // on this model.
 //
 
-
 // This class is shared by the three G1 memory pool classes
-// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
-// calculate used / committed bytes for these three pools is related
-// (see comment above), we put the calculations in this class so that
-// we can easily share them among the subclasses.
+// (G1EdenPool, G1SurvivorPool, G1OldGenPool).
 class G1MemoryPoolSuper : public CollectedMemoryPool {
 protected:
-  G1CollectedHeap* _g1h;
+  const static size_t _undefined_max = (size_t) -1;
+  G1MonitoringSupport* _g1mm;
 
   // Would only be called from subclasses.
   G1MemoryPoolSuper(G1CollectedHeap* g1h,
                     const char* name,
                     size_t init_size,
+                    size_t max_size,
                     bool support_usage_threshold);
-
-  // The reason why all the code is in static methods is so that it
-  // can be safely called from the constructors of the subclasses.
-
-  static size_t undefined_max() {
-    return (size_t) -1;
-  }
-
-  static size_t eden_space_committed(G1CollectedHeap* g1h);
-  static size_t eden_space_used(G1CollectedHeap* g1h);
-
-  static size_t survivor_space_committed(G1CollectedHeap* g1h);
-  static size_t survivor_space_used(G1CollectedHeap* g1h);
-
-  static size_t old_space_committed(G1CollectedHeap* g1h);
-  static size_t old_space_used(G1CollectedHeap* g1h);
 };
 
 // Memory pool that represents the G1 eden.
@@ -89,10 +70,10 @@
   G1EdenPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return eden_space_used(_g1h);
+    return _g1mm->eden_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
@@ -103,10 +84,10 @@
   G1SurvivorPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return survivor_space_used(_g1h);
+    return _g1mm->survivor_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
@@ -117,10 +98,10 @@
   G1OldGenPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return old_space_used(_g1h);
+    return _g1mm->old_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };