changeset 4185:1e5b6a49c06d hs24-b25

Merge
author amurillo
date Fri, 09 Nov 2012 07:31:56 -0800
parents 373fcf2269e6 (current diff) cea242198338 (diff)
children ce5983a3e0b2
files
diffstat 34 files changed, 985 insertions(+), 445 deletions(-) [+]
line wrap: on
line diff
--- a/make/hotspot_version	Thu Nov 08 18:46:17 2012 -0800
+++ b/make/hotspot_version	Fri Nov 09 07:31:56 2012 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=24
+HS_BUILD_NUMBER=25
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/linux/makefiles/defs.make	Thu Nov 08 18:46:17 2012 -0800
+++ b/make/linux/makefiles/defs.make	Fri Nov 09 07:31:56 2012 -0800
@@ -164,68 +164,70 @@
   # overridden in some situations, e.g., a BUILD_FLAVOR != product
   # build.
 
-  ifeq ($(BUILD_FLAVOR), product)
-    FULL_DEBUG_SYMBOLS ?= 1
-    ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
-  else
-    # debug variants always get Full Debug Symbols (if available)
-    ENABLE_FULL_DEBUG_SYMBOLS = 1
-  endif
-  _JUNK_ := $(shell \
-    echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
-  # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
+  # Due to the multiple sub-make processes that occur this logic gets
+  # executed multiple times. We reduce the noise by at least checking that
+  # BUILD_FLAVOR has been set.
+  ifneq ($(BUILD_FLAVOR),)
+    ifeq ($(BUILD_FLAVOR), product)
+      FULL_DEBUG_SYMBOLS ?= 1
+      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+    else
+      # debug variants always get Full Debug Symbols (if available)
+      ENABLE_FULL_DEBUG_SYMBOLS = 1
+    endif
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+    # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
 
-  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-    # Default OBJCOPY comes from GNU Binutils on Linux:
-    DEF_OBJCOPY=/usr/bin/objcopy
-    ifdef CROSS_COMPILE_ARCH
-      # don't try to generate .debuginfo files when cross compiling
-      _JUNK_ := $(shell \
-        echo >&2 "INFO: cross compiling for ARCH $(CROSS_COMPILE_ARCH)," \
-          "skipping .debuginfo generation.")
-      OBJCOPY=
-    else
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      # Default OBJCOPY comes from GNU Binutils on Linux
+      ifeq ($(CROSS_COMPILE_ARCH),)
+        DEF_OBJCOPY=/usr/bin/objcopy
+      else
+        # Assume objcopy is part of the cross-compilation toolset
+        ifneq ($(ALT_COMPILER_PATH),)
+          DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
+        endif
+      endif
       OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
       ifneq ($(ALT_OBJCOPY),)
         _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
         OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
       endif
-    endif
-  else
-    OBJCOPY=
-  endif
 
-  ifeq ($(OBJCOPY),)
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files.")
-    ENABLE_FULL_DEBUG_SYMBOLS=0
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
-  else
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
+      ifeq ($(OBJCOPY),)
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files. You may need to set ALT_OBJCOPY.")
+        ENABLE_FULL_DEBUG_SYMBOLS=0
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+      else
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
 
-    # Library stripping policies for .debuginfo configs:
-    #   all_strip - strips everything from the library
-    #   min_strip - strips most stuff from the library; leaves minimum symbols
-    #   no_strip  - does not strip the library at all
-    #
-    # Oracle security policy requires "all_strip". A waiver was granted on
-    # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
-    #
-    # Currently, STRIP_POLICY is only used when Full Debug Symbols is enabled.
-    #
-    STRIP_POLICY ?= min_strip
+        # Library stripping policies for .debuginfo configs:
+        #   all_strip - strips everything from the library
+        #   min_strip - strips most stuff from the library; leaves minimum symbols
+        #   no_strip  - does not strip the library at all
+        #
+        # Oracle security policy requires "all_strip". A waiver was granted on
+        # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
+        #
+        # Currently, STRIP_POLICY is only used when Full Debug Symbols is enabled.
+        #
+        STRIP_POLICY ?= min_strip
 
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
 
-    ZIP_DEBUGINFO_FILES ?= 1
+        ZIP_DEBUGINFO_FILES ?= 1
 
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
-  endif
-endif
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
+      endif
+    endif # ENABLE_FULL_DEBUG_SYMBOLS=1
+  endif # BUILD_FLAVOR
+endif # JDK_6_OR_EARLIER
 
 JDK_INCLUDE_SUBDIR=linux
 
--- a/make/linux/makefiles/vm.make	Thu Nov 08 18:46:17 2012 -0800
+++ b/make/linux/makefiles/vm.make	Fri Nov 09 07:31:56 2012 -0800
@@ -336,24 +336,23 @@
 	      fi                                                        \
             fi 								\
 	}
-ifeq ($(CROSS_COMPILE_ARCH),)
-  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
 	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
-    ifeq ($(STRIP_POLICY),all_strip)
+  ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
-    else
-      ifeq ($(STRIP_POLICY),min_strip)
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
 	$(QUIETLY) $(STRIP) -g $@
-      # implied else here is no stripping at all
-      endif
+    # implied else here is no stripping at all
     endif
+  endif
 	$(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
-    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
 	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
 	$(RM) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
 	[ -f $(LIBJVM_G_DIZ) ] || { ln -s $(LIBJVM_DIZ) $(LIBJVM_G_DIZ); }
-    endif
   endif
 endif
 
--- a/make/solaris/makefiles/defs.make	Thu Nov 08 18:46:17 2012 -0800
+++ b/make/solaris/makefiles/defs.make	Fri Nov 09 07:31:56 2012 -0800
@@ -109,60 +109,63 @@
   # overridden in some situations, e.g., a BUILD_FLAVOR != product
   # build.
 
-  ifeq ($(BUILD_FLAVOR), product)
-    FULL_DEBUG_SYMBOLS ?= 1
-    ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
-  else
-    # debug variants always get Full Debug Symbols (if available)
-    ENABLE_FULL_DEBUG_SYMBOLS = 1
-  endif
-  _JUNK_ := $(shell \
-    echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
-  # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
-
-  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-    # Default OBJCOPY comes from the SUNWbinutils package:
-    DEF_OBJCOPY=/usr/sfw/bin/gobjcopy
-    OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
-    ifneq ($(ALT_OBJCOPY),)
-      _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
-      OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+  # Due to the multiple sub-make processes that occur this logic gets
+  # executed multiple times. We reduce the noise by at least checking that
+  # BUILD_FLAVOR has been set.
+  ifneq ($(BUILD_FLAVOR),)
+    ifeq ($(BUILD_FLAVOR), product)
+      FULL_DEBUG_SYMBOLS ?= 1
+      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+    else
+      # debug variants always get Full Debug Symbols (if available)
+      ENABLE_FULL_DEBUG_SYMBOLS = 1
     endif
-  else
-    OBJCOPY=
-  endif
-
-  ifeq ($(OBJCOPY),)
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files.")
-    ENABLE_FULL_DEBUG_SYMBOLS=0
     _JUNK_ := $(shell \
       echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
-  else
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
+    # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
+
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      # Default OBJCOPY comes from the SUNWbinutils package:
+      DEF_OBJCOPY=/usr/sfw/bin/gobjcopy
+      OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
+      ifneq ($(ALT_OBJCOPY),)
+        _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
+        OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+      endif
+
+      ifeq ($(OBJCOPY),)
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files.")
+        ENABLE_FULL_DEBUG_SYMBOLS=0
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+      else
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
 
-    # Library stripping policies for .debuginfo configs:
-    #   all_strip - strips everything from the library
-    #   min_strip - strips most stuff from the library; leaves minimum symbols
-    #   no_strip  - does not strip the library at all
-    #
-    # Oracle security policy requires "all_strip". A waiver was granted on
-    # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
-    #
-    # Currently, STRIP_POLICY is only used when Full Debug Symbols is enabled.
-    #
-    STRIP_POLICY ?= min_strip
+        # Library stripping policies for .debuginfo configs:
+        #   all_strip - strips everything from the library
+        #   min_strip - strips most stuff from the library; leaves minimum symbols
+        #   no_strip  - does not strip the library at all
+        #
+        # Oracle security policy requires "all_strip". A waiver was granted on
+        # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
+        #
+        # Currently, STRIP_POLICY is only used when Full Debug Symbols is enabled.
+        #
+        STRIP_POLICY ?= min_strip
 
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
 
-    ZIP_DEBUGINFO_FILES ?= 1
+        ZIP_DEBUGINFO_FILES ?= 1
 
-    _JUNK_ := $(shell \
-      echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
-  endif
-endif
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
+      endif
+    endif # ENABLE_FULL_DEBUG_SYMBOLS=1
+  endif # BUILD_FLAVOR
+endif # JDK_6_OR_EARLIER
 
 JDK_INCLUDE_SUBDIR=solaris
 
--- a/make/windows/makefiles/defs.make	Thu Nov 08 18:46:17 2012 -0800
+++ b/make/windows/makefiles/defs.make	Fri Nov 09 07:31:56 2012 -0800
@@ -131,23 +131,29 @@
 # overridden in some situations, e.g., a BUILD_FLAVOR != product
 # build.
 
-ifeq ($(BUILD_FLAVOR), product)
-  FULL_DEBUG_SYMBOLS ?= 1
-  ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
-else
-  # debug variants always get Full Debug Symbols (if available)
-  ENABLE_FULL_DEBUG_SYMBOLS = 1
+# Due to the multiple sub-make processes that occur this logic gets
+# executed multiple times. We reduce the noise by at least checking that
+# BUILD_FLAVOR has been set.
+ifneq ($(BUILD_FLAVOR),)
+  ifeq ($(BUILD_FLAVOR), product)
+    FULL_DEBUG_SYMBOLS ?= 1
+    ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+  else
+    # debug variants always get Full Debug Symbols (if available)
+    ENABLE_FULL_DEBUG_SYMBOLS = 1
+  endif
+  _JUNK_ := $(shell \
+    echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+  MAKE_ARGS += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)
+
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    ZIP_DEBUGINFO_FILES ?= 1
+  else
+    ZIP_DEBUGINFO_FILES=0
+  endif
+  MAKE_ARGS += ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)
 endif
-_JUNK_ := $(shell \
-  echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
-MAKE_ARGS += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)
 
-ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-  ZIP_DEBUGINFO_FILES ?= 1
-else
-  ZIP_DEBUGINFO_FILES=0
-endif
-MAKE_ARGS += ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)
 MAKE_ARGS += RM="$(RM)"
 MAKE_ARGS += ZIPEXE=$(ZIPEXE)
 
--- a/src/share/vm/ci/ciClassList.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/ci/ciClassList.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -49,6 +49,7 @@
 class     ciCallSite;
 class     ciMemberName;
 class     ciMethodHandle;
+class     ciMethodType;
 class   ciMethod;
 class   ciMethodData;
 class     ciReceiverTypeData;  // part of ciMethodData
@@ -105,6 +106,7 @@
 friend class ciMethod;                 \
 friend class ciMethodData;             \
 friend class ciMethodHandle;           \
+friend class ciMethodType;             \
 friend class ciReceiverTypeData;       \
 friend class ciSymbol;                 \
 friend class ciArray;                  \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/ci/ciMethodType.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CI_CIMETHODTYPE_HPP
+#define SHARE_VM_CI_CIMETHODTYPE_HPP
+
+#include "ci/ciInstance.hpp"
+#include "ci/ciUtilities.hpp"
+#include "classfile/javaClasses.hpp"
+
+// ciMethodType
+//
+// The class represents a java.lang.invoke.MethodType object.
+class ciMethodType : public ciInstance {
+private:
+  ciType* class_to_citype(oop klass_oop) const {
+    if (java_lang_Class::is_primitive(klass_oop)) {
+      BasicType bt = java_lang_Class::primitive_type(klass_oop);
+      return ciType::make(bt);
+    } else {
+      klassOop k = java_lang_Class::as_klassOop(klass_oop);
+      return CURRENT_ENV->get_object(k)->as_klass();
+    }
+  }
+
+public:
+  ciMethodType(instanceHandle h_i) : ciInstance(h_i) {}
+
+  // What kind of ciObject is this?
+  bool is_method_type() const { return true; }
+
+  ciType* rtype() const {
+    GUARDED_VM_ENTRY(
+      oop rtype = java_lang_invoke_MethodType::rtype(get_oop());
+      return class_to_citype(rtype);
+    )
+  }
+
+  int ptype_count() const {
+    GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_count(get_oop());)
+  }
+
+  int ptype_slot_count() const {
+    GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_slot_count(get_oop());)
+  }
+
+  ciType* ptype_at(int index) const {
+    GUARDED_VM_ENTRY(
+      oop ptype = java_lang_invoke_MethodType::ptype(get_oop(), index);
+      return class_to_citype(ptype);
+    )
+  }
+};
+
+#endif // SHARE_VM_CI_CIMETHODTYPE_HPP
--- a/src/share/vm/ci/ciObject.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/ci/ciObject.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -146,6 +146,7 @@
   virtual bool is_method()                  { return false; }
   virtual bool is_method_data()             { return false; }
   virtual bool is_method_handle()     const { return false; }
+  virtual bool is_method_type()       const { return false; }
   virtual bool is_array()                   { return false; }
   virtual bool is_obj_array()               { return false; }
   virtual bool is_type_array()              { return false; }
@@ -193,103 +194,107 @@
   }
 
   // Subclass casting with assertions.
-  ciNullObject*            as_null_object() {
+  ciNullObject* as_null_object() {
     assert(is_null_object(), "bad cast");
     return (ciNullObject*)this;
   }
-  ciCallSite*              as_call_site() {
+  ciCallSite* as_call_site() {
     assert(is_call_site(), "bad cast");
-    return (ciCallSite*) this;
+    return (ciCallSite*)this;
   }
-  ciCPCache*               as_cpcache() {
+  ciCPCache* as_cpcache() {
     assert(is_cpcache(), "bad cast");
-    return (ciCPCache*) this;
+    return (ciCPCache*)this;
   }
-  ciInstance*              as_instance() {
+  ciInstance* as_instance() {
     assert(is_instance(), "bad cast");
     return (ciInstance*)this;
   }
-  ciMemberName*            as_member_name() {
+  ciMemberName* as_member_name() {
     assert(is_member_name(), "bad cast");
     return (ciMemberName*)this;
   }
-  ciMethod*                as_method() {
+  ciMethod* as_method() {
     assert(is_method(), "bad cast");
     return (ciMethod*)this;
   }
-  ciMethodData*            as_method_data() {
+  ciMethodData* as_method_data() {
     assert(is_method_data(), "bad cast");
     return (ciMethodData*)this;
   }
-  ciMethodHandle*          as_method_handle() {
+  ciMethodHandle* as_method_handle() {
     assert(is_method_handle(), "bad cast");
-    return (ciMethodHandle*) this;
+    return (ciMethodHandle*)this;
   }
-  ciArray*                 as_array() {
+  ciMethodType* as_method_type() {
+    assert(is_method_type(), "bad cast");
+    return (ciMethodType*)this;
+  }
+  ciArray* as_array() {
     assert(is_array(), "bad cast");
     return (ciArray*)this;
   }
-  ciObjArray*              as_obj_array() {
+  ciObjArray* as_obj_array() {
     assert(is_obj_array(), "bad cast");
     return (ciObjArray*)this;
   }
-  ciTypeArray*             as_type_array() {
+  ciTypeArray* as_type_array() {
     assert(is_type_array(), "bad cast");
     return (ciTypeArray*)this;
   }
-  ciSymbol*                as_symbol() {
+  ciSymbol* as_symbol() {
     assert(is_symbol(), "bad cast");
     return (ciSymbol*)this;
   }
-  ciType*                  as_type() {
+  ciType* as_type() {
     assert(is_type(), "bad cast");
     return (ciType*)this;
   }
-  ciReturnAddress*         as_return_address() {
+  ciReturnAddress* as_return_address() {
     assert(is_return_address(), "bad cast");
     return (ciReturnAddress*)this;
   }
-  ciKlass*                 as_klass() {
+  ciKlass* as_klass() {
     assert(is_klass(), "bad cast");
     return (ciKlass*)this;
   }
-  ciInstanceKlass*         as_instance_klass() {
+  ciInstanceKlass* as_instance_klass() {
     assert(is_instance_klass(), "bad cast");
     return (ciInstanceKlass*)this;
   }
-  ciMethodKlass*           as_method_klass() {
+  ciMethodKlass* as_method_klass() {
     assert(is_method_klass(), "bad cast");
     return (ciMethodKlass*)this;
   }
-  ciArrayKlass*            as_array_klass() {
+  ciArrayKlass* as_array_klass() {
     assert(is_array_klass(), "bad cast");
     return (ciArrayKlass*)this;
   }
-  ciObjArrayKlass*         as_obj_array_klass() {
+  ciObjArrayKlass* as_obj_array_klass() {
     assert(is_obj_array_klass(), "bad cast");
     return (ciObjArrayKlass*)this;
   }
-  ciTypeArrayKlass*        as_type_array_klass() {
+  ciTypeArrayKlass* as_type_array_klass() {
     assert(is_type_array_klass(), "bad cast");
     return (ciTypeArrayKlass*)this;
   }
-  ciKlassKlass*            as_klass_klass() {
+  ciKlassKlass* as_klass_klass() {
     assert(is_klass_klass(), "bad cast");
     return (ciKlassKlass*)this;
   }
-  ciInstanceKlassKlass*    as_instance_klass_klass() {
+  ciInstanceKlassKlass* as_instance_klass_klass() {
     assert(is_instance_klass_klass(), "bad cast");
     return (ciInstanceKlassKlass*)this;
   }
-  ciArrayKlassKlass*       as_array_klass_klass() {
+  ciArrayKlassKlass* as_array_klass_klass() {
     assert(is_array_klass_klass(), "bad cast");
     return (ciArrayKlassKlass*)this;
   }
-  ciObjArrayKlassKlass*    as_obj_array_klass_klass() {
+  ciObjArrayKlassKlass* as_obj_array_klass_klass() {
     assert(is_obj_array_klass_klass(), "bad cast");
     return (ciObjArrayKlassKlass*)this;
   }
-  ciTypeArrayKlassKlass*   as_type_array_klass_klass() {
+  ciTypeArrayKlassKlass* as_type_array_klass_klass() {
     assert(is_type_array_klass_klass(), "bad cast");
     return (ciTypeArrayKlassKlass*)this;
   }
--- a/src/share/vm/ci/ciObjectFactory.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -32,6 +32,7 @@
 #include "ci/ciMethod.hpp"
 #include "ci/ciMethodData.hpp"
 #include "ci/ciMethodHandle.hpp"
+#include "ci/ciMethodType.hpp"
 #include "ci/ciMethodKlass.hpp"
 #include "ci/ciNullObject.hpp"
 #include "ci/ciObjArray.hpp"
@@ -349,6 +350,8 @@
       return new (arena()) ciMemberName(h_i);
     else if (java_lang_invoke_MethodHandle::is_instance(o))
       return new (arena()) ciMethodHandle(h_i);
+    else if (java_lang_invoke_MethodType::is_instance(o))
+      return new (arena()) ciMethodType(h_i);
     else
       return new (arena()) ciInstance(h_i);
   } else if (o->is_objArray()) {
--- a/src/share/vm/ci/ciSignature.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/ci/ciSignature.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "ci/ciMethodType.hpp"
 #include "ci/ciSignature.hpp"
 #include "ci/ciUtilities.hpp"
 #include "memory/allocation.inline.hpp"
@@ -80,6 +81,24 @@
 }
 
 // ------------------------------------------------------------------
+// ciSignature::ciSignature
+ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol, ciMethodType* method_type) :
+  _symbol(symbol),
+  _accessing_klass(accessing_klass),
+  _size( method_type->ptype_slot_count()),
+  _count(method_type->ptype_count())
+{
+  ASSERT_IN_VM;
+  EXCEPTION_CONTEXT;
+  Arena* arena = CURRENT_ENV->arena();
+  _types = new (arena) GrowableArray<ciType*>(arena, _count + 1, 0, NULL);
+  for (int i = 0; i < _count; i++) {
+    _types->append(method_type->ptype_at(i));
+  }
+  _types->append(method_type->rtype());
+}
+
+// ------------------------------------------------------------------
 // ciSignature::return_type
 //
 // What is the return type of this signature?
--- a/src/share/vm/ci/ciSignature.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/ci/ciSignature.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -47,6 +47,7 @@
   friend class ciObjectFactory;
 
   ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature);
+  ciSignature(ciKlass* accessing_klass,                           ciSymbol* signature, ciMethodType* method_type);
 
   void get_all_klasses();
 
--- a/src/share/vm/ci/ciStreams.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/ci/ciStreams.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -363,12 +363,15 @@
   constantPoolHandle cpool(_method->get_methodOop()->constants());
   ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
   will_link = m->is_loaded();
-  // Get declared method signature and return it.
-  if (has_optional_appendix()) {
-    const int sig_index = get_method_signature_index();
-    Symbol* sig_sym = cpool->symbol_at(sig_index);
-    ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass();
-    (*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym));
+
+  // Use the MethodType stored in the CP cache to create a signature
+  // with correct types (in respect to class loaders).
+  if (has_method_type()) {
+    ciSymbol*     sig_sym     = env->get_symbol(cpool->symbol_at(get_method_signature_index()));
+    ciKlass*      pool_holder = env->get_object(cpool->pool_holder())->as_klass();
+    ciMethodType* method_type = get_method_type();
+    ciSignature* declared_signature = new (env->arena()) ciSignature(pool_holder, sig_sym, method_type);
+    (*declared_signature_result) = declared_signature;
   } else {
     (*declared_signature_result) = m->signature();
   }
@@ -399,6 +402,31 @@
 }
 
 // ------------------------------------------------------------------
+// ciBytecodeStream::has_method_type
+//
+// Returns true if there is a MethodType argument stored in the
+// constant pool cache at the current bci.
+bool ciBytecodeStream::has_method_type() {
+  GUARDED_VM_ENTRY(
+    constantPoolHandle cpool(_method->get_methodOop()->constants());
+    return constantPoolOopDesc::has_method_type_at_if_loaded(cpool, get_method_index());
+  )
+}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_method_type
+//
+// Return the MethodType stored in the constant pool cache at
+// the current bci.
+ciMethodType* ciBytecodeStream::get_method_type() {
+  GUARDED_VM_ENTRY(
+    constantPoolHandle cpool(_method->get_methodOop()->constants());
+    oop method_type_oop = constantPoolOopDesc::method_type_at_if_loaded(cpool, get_method_index());
+    return CURRENT_ENV->get_object(method_type_oop)->as_method_type();
+  )
+}
+
+// ------------------------------------------------------------------
 // ciBytecodeStream::get_declared_method_holder
 //
 // Get the declared holder of the currently referenced method.
--- a/src/share/vm/ci/ciStreams.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/ci/ciStreams.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -259,12 +259,14 @@
   int      get_field_holder_index();
   int      get_field_signature_index();
 
-  ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result);
-  bool      has_appendix();
-  ciObject* get_appendix();
-  ciKlass*  get_declared_method_holder();
-  int       get_method_holder_index();
-  int       get_method_signature_index();
+  ciMethod*     get_method(bool& will_link, ciSignature* *declared_signature_result);
+  bool          has_appendix();
+  ciObject*     get_appendix();
+  bool          has_method_type();
+  ciMethodType* get_method_type();
+  ciKlass*      get_declared_method_holder();
+  int           get_method_holder_index();
+  int           get_method_signature_index();
 
   ciCPCache*  get_cpcache() const;
   ciCallSite* get_call_site();
--- a/src/share/vm/classfile/systemDictionary.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/classfile/systemDictionary.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -2371,7 +2371,8 @@
 methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
                                                           Symbol* signature,
                                                           KlassHandle accessing_klass,
-                                                          Handle* appendix_result,
+                                                          Handle *appendix_result,
+                                                          Handle *method_type_result,
                                                           TRAPS) {
   methodHandle empty;
   assert(EnableInvokeDynamic, "");
@@ -2403,6 +2404,7 @@
                          vmSymbols::linkMethod_signature(),
                          &args, CHECK_(empty));
   Handle mname(THREAD, (oop) result.get_jobject());
+  (*method_type_result) = method_type;
   return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD);
 }
 
@@ -2549,7 +2551,8 @@
                                                               Handle bootstrap_specifier,
                                                               Symbol* name,
                                                               Symbol* type,
-                                                              Handle* appendix_result,
+                                                              Handle *appendix_result,
+                                                              Handle *method_type_result,
                                                               TRAPS) {
   methodHandle empty;
   Handle bsm, info;
@@ -2592,6 +2595,7 @@
                          vmSymbols::linkCallSite_signature(),
                          &args, CHECK_(empty));
   Handle mname(THREAD, (oop) result.get_jobject());
+  (*method_type_result) = method_type;
   return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD);
 }
 
--- a/src/share/vm/classfile/systemDictionary.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/classfile/systemDictionary.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -483,6 +483,7 @@
                                                  Symbol* signature,
                                                  KlassHandle accessing_klass,
                                                  Handle *appendix_result,
+                                                 Handle *method_type_result,
                                                  TRAPS);
   // for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic)
   // (does not ask Java, since this is a low-level intrinsic defined by the JVM)
@@ -509,6 +510,7 @@
                                                      Symbol* name,
                                                      Symbol* type,
                                                      Handle *appendix_result,
+                                                     Handle *method_type_result,
                                                      TRAPS);
 
   // Utility for printing loader "name" as part of tracing constraints
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -1190,28 +1190,14 @@
 // liveness counting data.
 class CMCountDataClosureBase: public HeapRegionClosure {
 protected:
+  G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
+  CardTableModRefBS* _ct_bs;
+
   BitMap* _region_bm;
   BitMap* _card_bm;
 
-  void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
-    assert(start_idx <= last_idx, "sanity");
-
-    // Set the inclusive bit range [start_idx, last_idx].
-    // For small ranges (up to 8 cards) use a simple loop; otherwise
-    // use par_at_put_range.
-    if ((last_idx - start_idx) < 8) {
-      for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
-        _card_bm->par_set_bit(i);
-      }
-    } else {
-      assert(last_idx < _card_bm->size(), "sanity");
-      // Note BitMap::par_at_put_range() is exclusive.
-      _card_bm->par_at_put_range(start_idx, last_idx+1, true);
-    }
-  }
-
-  // It takes a region that's not empty (i.e., it has at least one
+  // Takes a region that's not empty (i.e., it has at least one
   // live object in it and sets its corresponding bit on the region
   // bitmap to 1. If the region is "starts humongous" it will also set
   // to 1 the bits on the region bitmap that correspond to its
@@ -1232,9 +1218,11 @@
   }
 
 public:
-  CMCountDataClosureBase(ConcurrentMark *cm,
+  CMCountDataClosureBase(G1CollectedHeap* g1h,
                          BitMap* region_bm, BitMap* card_bm):
-    _cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
+    _g1h(g1h), _cm(g1h->concurrent_mark()),
+    _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
+    _region_bm(region_bm), _card_bm(card_bm) { }
 };
 
 // Closure that calculates the # live objects per region. Used
@@ -1244,9 +1232,9 @@
   size_t _region_marked_bytes;
 
 public:
-  CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
+  CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
                          BitMap* region_bm, BitMap* card_bm) :
-    CMCountDataClosureBase(cm, region_bm, card_bm),
+    CMCountDataClosureBase(g1h, region_bm, card_bm),
     _bm(bm), _region_marked_bytes(0) { }
 
   bool doHeapRegion(HeapRegion* hr) {
@@ -1262,44 +1250,63 @@
       return false;
     }
 
-    HeapWord* nextTop = hr->next_top_at_mark_start();
-    HeapWord* start   = hr->bottom();
-
-    assert(start <= hr->end() && start <= nextTop && nextTop <= hr->end(),
+    HeapWord* ntams = hr->next_top_at_mark_start();
+    HeapWord* start = hr->bottom();
+
+    assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
            err_msg("Preconditions not met - "
-                   "start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
-                   start, nextTop, hr->end()));
+                   "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
+                   start, ntams, hr->end()));
 
     // Find the first marked object at or after "start".
-    start = _bm->getNextMarkedWordAddress(start, nextTop);
+    start = _bm->getNextMarkedWordAddress(start, ntams);
 
     size_t marked_bytes = 0;
 
-    while (start < nextTop) {
+    while (start < ntams) {
       oop obj = oop(start);
       int obj_sz = obj->size();
-      HeapWord* obj_last = start + obj_sz - 1;
+      HeapWord* obj_end = start + obj_sz;
 
       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
-      BitMap::idx_t last_idx = _cm->card_bitmap_index_for(obj_last);
-
-      // Set the bits in the card BM for this object (inclusive).
-      set_card_bitmap_range(start_idx, last_idx);
+      BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
+
+      // Note: if we're looking at the last region in heap - obj_end
+      // could be actually just beyond the end of the heap; end_idx
+      // will then correspond to a (non-existent) card that is also
+      // just beyond the heap.
+      if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
+        // end of object is not card aligned - increment to cover
+        // all the cards spanned by the object
+        end_idx += 1;
+      }
+
+      // Set the bits in the card BM for the cards spanned by this object.
+      _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
 
       // Add the size of this object to the number of marked bytes.
       marked_bytes += (size_t)obj_sz * HeapWordSize;
 
       // Find the next marked object after this one.
-      start = _bm->getNextMarkedWordAddress(obj_last + 1, nextTop);
+      start = _bm->getNextMarkedWordAddress(obj_end, ntams);
     }
 
     // Mark the allocated-since-marking portion...
     HeapWord* top = hr->top();
-    if (nextTop < top) {
-      BitMap::idx_t start_idx = _cm->card_bitmap_index_for(nextTop);
-      BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top - 1);
-
-      set_card_bitmap_range(start_idx, last_idx);
+    if (ntams < top) {
+      BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
+      BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
+
+      // Note: if we're looking at the last region in heap - top
+      // could be actually just beyond the end of the heap; end_idx
+      // will then correspond to a (non-existent) card that is also
+      // just beyond the heap.
+      if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
+        // end of object is not card aligned - increment to cover
+        // all the cards spanned by the object
+        end_idx += 1;
+      }
+      _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
 
       // This definitely means the region has live objects.
       set_bit_for_region(hr);
@@ -1326,6 +1333,7 @@
 // regions during the STW cleanup pause.
 
 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
   CalcLiveObjectsClosure _calc_cl;
   BitMap* _region_bm;   // Region BM to be verified
@@ -1338,14 +1346,14 @@
   int _failures;
 
 public:
-  VerifyLiveObjectDataHRClosure(ConcurrentMark* cm,
+  VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
                                 BitMap* region_bm,
                                 BitMap* card_bm,
                                 BitMap* exp_region_bm,
                                 BitMap* exp_card_bm,
                                 bool verbose) :
-    _cm(cm),
-    _calc_cl(_cm->nextMarkBitMap(), _cm, exp_region_bm, exp_card_bm),
+    _g1h(g1h), _cm(g1h->concurrent_mark()),
+    _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
     _failures(0) { }
@@ -1492,7 +1500,7 @@
   void work(uint worker_id) {
     assert(worker_id < _n_workers, "invariant");
 
-    VerifyLiveObjectDataHRClosure verify_cl(_cm,
+    VerifyLiveObjectDataHRClosure verify_cl(_g1h,
                                             _actual_region_bm, _actual_card_bm,
                                             _expected_region_bm,
                                             _expected_card_bm,
@@ -1522,10 +1530,10 @@
 
 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
  public:
-  FinalCountDataUpdateClosure(ConcurrentMark* cm,
+  FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
                               BitMap* region_bm,
                               BitMap* card_bm) :
-    CMCountDataClosureBase(cm, region_bm, card_bm) { }
+    CMCountDataClosureBase(g1h, region_bm, card_bm) { }
 
   bool doHeapRegion(HeapRegion* hr) {
 
@@ -1549,12 +1557,30 @@
     if (ntams < top) {
       // This definitely means the region has live objects.
       set_bit_for_region(hr);
-    }
-
-    // Now set the bits for [ntams, top]
-    BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
-    BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top);
-    set_card_bitmap_range(start_idx, last_idx);
+
+      // Now set the bits in the card bitmap for [ntams, top)
+      BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
+      BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
+
+      // Note: if we're looking at the last region in heap - top
+      // could be actually just beyond the end of the heap; end_idx
+      // will then correspond to a (non-existent) card that is also
+      // just beyond the heap.
+      if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
+        // end of object is not card aligned - increment to cover
+        // all the cards spanned by the object
+        end_idx += 1;
+      }
+
+      assert(end_idx <= _card_bm->size(),
+             err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
+                     end_idx, _card_bm->size()));
+      assert(start_idx < _card_bm->size(),
+             err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
+                     start_idx, _card_bm->size()));
+
+      _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
+     }
 
     // Set the bit for the region if it contains live data
     if (hr->next_marked_bytes() > 0) {
@@ -1594,7 +1620,7 @@
   void work(uint worker_id) {
     assert(worker_id < _n_workers, "invariant");
 
-    FinalCountDataUpdateClosure final_update_cl(_cm,
+    FinalCountDataUpdateClosure final_update_cl(_g1h,
                                                 _actual_region_bm,
                                                 _actual_card_bm);
 
@@ -2834,20 +2860,19 @@
 // Aggregate the counting data that was constructed concurrently
 // with marking.
 class AggregateCountDataHRClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
+  CardTableModRefBS* _ct_bs;
   BitMap* _cm_card_bm;
   size_t _max_task_num;
 
  public:
-  AggregateCountDataHRClosure(ConcurrentMark *cm,
+  AggregateCountDataHRClosure(G1CollectedHeap* g1h,
                               BitMap* cm_card_bm,
                               size_t max_task_num) :
-    _cm(cm), _cm_card_bm(cm_card_bm),
-    _max_task_num(max_task_num) { }
-
-  bool is_card_aligned(HeapWord* p) {
-    return ((uintptr_t(p) & (CardTableModRefBS::card_size - 1)) == 0);
-  }
+    _g1h(g1h), _cm(g1h->concurrent_mark()),
+    _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
+    _cm_card_bm(cm_card_bm), _max_task_num(max_task_num) { }
 
   bool doHeapRegion(HeapRegion* hr) {
     if (hr->continuesHumongous()) {
@@ -2878,16 +2903,23 @@
       return false;
     }
 
-    assert(is_card_aligned(start), "sanity");
-    assert(is_card_aligned(end), "sanity");
+    // 'start' should be in the heap.
+    assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
+    // 'end' *may* be just beyone the end of the heap (if hr is the last region)
+    assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
 
     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
 
-    // If ntams is not card aligned then we bump the index for
-    // limit so that we get the card spanning ntams.
-    if (!is_card_aligned(limit)) {
+    // If ntams is not card aligned then we bump card bitmap index
+    // for limit so that we get the all the cards spanned by
+    // the object ending at ntams.
+    // Note: if this is the last region in the heap then ntams
+    // could be actually just beyond the end of the the heap;
+    // limit_idx will then  correspond to a (non-existent) card
+    // that is also outside the heap.
+    if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
       limit_idx += 1;
     }
 
@@ -2916,7 +2948,7 @@
 
         // BitMap::get_next_one_offset() can handle the case when
         // its left_offset parameter is greater than its right_offset
-        // parameter. If does, however, have an early exit if
+        // parameter. It does, however, have an early exit if
         // left_offset == right_offset. So let's limit the value
         // passed in for left offset here.
         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
@@ -2952,7 +2984,7 @@
     _active_workers(n_workers) { }
 
   void work(uint worker_id) {
-    AggregateCountDataHRClosure cl(_cm, _cm_card_bm, _max_task_num);
+    AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_task_num);
 
     if (G1CollectedHeap::use_parallel_gc_threads()) {
       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -806,7 +806,14 @@
     return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
   }
 
-  // Counting data structure accessors
+  // Liveness counting
+
+  // Utility routine to set an exclusive range of cards on the given
+  // card liveness bitmap
+  inline void set_card_bitmap_range(BitMap* card_bm,
+                                    BitMap::idx_t start_idx,
+                                    BitMap::idx_t end_idx,
+                                    bool is_par);
 
   // Returns the card number of the bottom of the G1 heap.
   // Used in biasing indices into accounting card bitmaps.
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -28,6 +28,42 @@
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 
+// Utility routine to set an exclusive range of cards on the given
+// card liveness bitmap
+inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
+                                                  BitMap::idx_t start_idx,
+                                                  BitMap::idx_t end_idx,
+                                                  bool is_par) {
+
+  // Set the exclusive bit range [start_idx, end_idx).
+  assert((end_idx - start_idx) > 0, "at least one card");
+  assert(end_idx <= card_bm->size(), "sanity");
+
+  // Silently clip the end index
+  end_idx = MIN2(end_idx, card_bm->size());
+
+  // For small ranges use a simple loop; otherwise use set_range or
+  // use par_at_put_range (if parallel). The range is made up of the
+  // cards that are spanned by an object/mem region so 8 cards will
+  // allow up to object sizes up to 4K to be handled using the loop.
+  if ((end_idx - start_idx) <= 8) {
+    for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
+      if (is_par) {
+        card_bm->par_set_bit(i);
+      } else {
+        card_bm->set_bit(i);
+      }
+    }
+  } else {
+    // Note BitMap::par_at_put_range() and BitMap::set_range() are exclusive.
+    if (is_par) {
+      card_bm->par_at_put_range(start_idx, end_idx, true);
+    } else {
+      card_bm->set_range(start_idx, end_idx);
+    }
+  }
+}
+
 // Returns the index in the liveness accounting card bitmap
 // for the given address
 inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
@@ -35,7 +71,6 @@
   // by the card shift -- address 0 corresponds to card number 0.  One
   // must subtract the card num of the bottom of the heap to obtain a
   // card table index.
-
   intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
   return card_num - heap_bottom_card_num();
 }
@@ -46,8 +81,10 @@
                                          size_t* marked_bytes_array,
                                          BitMap* task_card_bm) {
   G1CollectedHeap* g1h = _g1h;
+  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
+
   HeapWord* start = mr.start();
-  HeapWord* last = mr.last();
+  HeapWord* end = mr.end();
   size_t region_size_bytes = mr.byte_size();
   uint index = hr->hrs_index();
 
@@ -61,24 +98,21 @@
   marked_bytes_array[index] += region_size_bytes;
 
   BitMap::idx_t start_idx = card_bitmap_index_for(start);
-  BitMap::idx_t last_idx = card_bitmap_index_for(last);
+  BitMap::idx_t end_idx = card_bitmap_index_for(end);
 
-  // The card bitmap is task/worker specific => no need to use 'par' routines.
-  // Set bits in the inclusive bit range [start_idx, last_idx].
-  //
-  // For small ranges use a simple loop; otherwise use set_range
-  // The range are the cards that are spanned by the object/region
-  // so 8 cards will allow objects/regions up to 4K to be handled
-  // using the loop.
-  if ((last_idx - start_idx) <= 8) {
-    for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
-     task_card_bm->set_bit(i);
-    }
-  } else {
-    assert(last_idx < task_card_bm->size(), "sanity");
-    // Note: BitMap::set_range() is exclusive.
-    task_card_bm->set_range(start_idx, last_idx+1);
+  // Note: if we're looking at the last region in heap - end
+  // could be actually just beyond the end of the heap; end_idx
+  // will then correspond to a (non-existent) card that is also
+  // just beyond the heap.
+  if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
+    // end of region is not card aligned - incremement to cover
+    // all the cards spanned by the region.
+    end_idx += 1;
   }
+  // The card bitmap is task/worker specific => no need to use
+  // the 'par' BitMap routines.
+  // Set bits in the exclusive bit range [start_idx, end_idx).
+  set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
 }
 
 // Counts the given memory region in the task/worker counting
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -746,7 +746,8 @@
   cache_entry(thread)->set_method_handle(
       pool,
       info.resolved_method(),
-      info.resolved_appendix());
+      info.resolved_appendix(),
+      info.resolved_method_type());
 }
 IRT_END
 
@@ -773,7 +774,8 @@
   pool->cache()->secondary_entry_at(index)->set_dynamic_call(
       pool,
       info.resolved_method(),
-      info.resolved_appendix());
+      info.resolved_appendix(),
+      info.resolved_method_type());
 }
 IRT_END
 
--- a/src/share/vm/interpreter/linkResolver.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/interpreter/linkResolver.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -99,7 +99,7 @@
   assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
 }
 
-void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, TRAPS) {
+void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS) {
   if (resolved_method.is_null()) {
     THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null");
   }
@@ -110,7 +110,8 @@
   int vtable_index = methodOopDesc::nonvirtual_vtable_index;
   assert(resolved_method->vtable_index() == vtable_index, "");
   set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
-  _resolved_appendix = resolved_appendix;
+  _resolved_appendix    = resolved_appendix;
+  _resolved_method_type = resolved_method_type;
 }
 
 void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
@@ -221,7 +222,8 @@
 void LinkResolver::lookup_polymorphic_method(methodHandle& result,
                                              KlassHandle klass, Symbol* name, Symbol* full_signature,
                                              KlassHandle current_klass,
-                                             Handle* appendix_result_or_null,
+                                             Handle *appendix_result_or_null,
+                                             Handle *method_type_result,
                                              TRAPS) {
   vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name);
   if (TraceMethodHandles) {
@@ -275,10 +277,12 @@
       }
 
       Handle appendix;
+      Handle method_type;
       result = SystemDictionary::find_method_handle_invoker(name,
                                                             full_signature,
                                                             current_klass,
                                                             &appendix,
+                                                            &method_type,
                                                             CHECK);
       if (TraceMethodHandles) {
         tty->print("lookup_polymorphic_method => (via Java) ");
@@ -307,6 +311,7 @@
 
         assert(appendix_result_or_null != NULL, "");
         (*appendix_result_or_null) = appendix;
+        (*method_type_result)      = method_type;
         return;
       }
     }
@@ -419,7 +424,7 @@
     if (resolved_method.is_null()) {
       // JSR 292:  see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc
       lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature,
-                                current_klass, (Handle*)NULL, THREAD);
+                                current_klass, (Handle*)NULL, (Handle*)NULL, THREAD);
       if (HAS_PENDING_EXCEPTION) {
         nested_exception = Handle(THREAD, PENDING_EXCEPTION);
         CLEAR_PENDING_EXCEPTION;
@@ -1207,11 +1212,12 @@
   assert(resolved_klass() == SystemDictionary::MethodHandle_klass(), "");
   assert(MethodHandles::is_signature_polymorphic_name(method_name), "");
   methodHandle resolved_method;
-  Handle resolved_appendix;
+  Handle       resolved_appendix;
+  Handle       resolved_method_type;
   lookup_polymorphic_method(resolved_method, resolved_klass,
                             method_name, method_signature,
-                            current_klass, &resolved_appendix, CHECK);
-  result.set_handle(resolved_method, resolved_appendix, CHECK);
+                            current_klass, &resolved_appendix, &resolved_method_type, CHECK);
+  result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK);
 }
 
 
@@ -1219,7 +1225,7 @@
   assert(EnableInvokeDynamic, "");
   pool->set_invokedynamic();    // mark header to flag active call sites
 
-  //resolve_pool(<resolved_klass>, method_name,  method_signature, current_klass, pool, index, CHECK);
+  //resolve_pool(<resolved_klass>, method_name, method_signature, current_klass, pool, index, CHECK);
   Symbol* method_name       = pool->name_ref_at(index);
   Symbol* method_signature  = pool->signature_ref_at(index);
   KlassHandle current_klass = KlassHandle(THREAD, pool->pool_holder());
@@ -1236,9 +1242,10 @@
     bootstrap_specifier = Handle(THREAD, bsm_info);
   }
   if (!cpce->is_f1_null()) {
-    methodHandle method(THREAD, cpce->f2_as_vfinal_method());
-    Handle appendix(THREAD, cpce->has_appendix() ? cpce->f1_appendix() : (oop)NULL);
-    result.set_handle(method, appendix, CHECK);
+    methodHandle method(     THREAD, cpce->f2_as_vfinal_method());
+    Handle       appendix(   THREAD, cpce->appendix_if_resolved(pool));
+    Handle       method_type(THREAD, cpce->method_type_if_resolved(pool));
+    result.set_handle(method, appendix, method_type, CHECK);
     return;
   }
 
@@ -1260,11 +1267,13 @@
   // JSR 292:  this must resolve to an implicitly generated method MH.linkToCallSite(*...)
   // The appendix argument is likely to be a freshly-created CallSite.
   Handle       resolved_appendix;
+  Handle       resolved_method_type;
   methodHandle resolved_method =
     SystemDictionary::find_dynamic_call_site_invoker(current_klass,
                                                      bootstrap_specifier,
                                                      method_name, method_signature,
                                                      &resolved_appendix,
+                                                     &resolved_method_type,
                                                      THREAD);
   if (HAS_PENDING_EXCEPTION) {
     if (TraceMethodHandles) {
@@ -1284,7 +1293,7 @@
     CLEAR_PENDING_EXCEPTION;
     THROW_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), nested_exception)
   }
-  result.set_handle(resolved_method, resolved_appendix, CHECK);
+  result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK);
 }
 
 //------------------------------------------------------------------------------------------------------------------------
--- a/src/share/vm/interpreter/linkResolver.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/interpreter/linkResolver.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -76,12 +76,13 @@
   methodHandle _selected_method;        // dynamic (actual) target method
   int          _vtable_index;           // vtable index of selected method
   Handle       _resolved_appendix;      // extra argument in constant pool (if CPCE::has_appendix)
+  Handle       _resolved_method_type;   // MethodType (for invokedynamic and invokehandle call sites)
 
-  void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                , TRAPS);
-  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                  , TRAPS);
-  void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
-  void         set_handle(                                                           methodHandle resolved_method,   Handle resolved_appendix,                     TRAPS);
-  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
+  void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                       , TRAPS);
+  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                         , TRAPS);
+  void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
+  void         set_handle(                                                           methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
+  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
 
   friend class LinkResolver;
 
@@ -91,6 +92,7 @@
   methodHandle resolved_method() const           { return _resolved_method; }
   methodHandle selected_method() const           { return _selected_method; }
   Handle       resolved_appendix() const         { return _resolved_appendix; }
+  Handle       resolved_method_type() const      { return _resolved_method_type; }
 
   BasicType    result_type() const               { return selected_method()->result_type(); }
   bool         has_vtable_index() const          { return _vtable_index >= 0; }
@@ -113,7 +115,7 @@
   static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
   static void lookup_method_in_interfaces       (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
   static void lookup_polymorphic_method         (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
-                                                 KlassHandle current_klass, Handle* appendix_result_or_null, TRAPS);
+                                                 KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
 
   static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
 
--- a/src/share/vm/interpreter/rewriter.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/interpreter/rewriter.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -163,10 +163,14 @@
       if (status == 0) {
         if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_MethodHandle() &&
             MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(),
-                                                         _pool->name_ref_at(cp_index)))
+                                                         _pool->name_ref_at(cp_index))) {
+          assert(has_cp_cache(cp_index), "should already have an entry");
+          int cpc  = maybe_add_cp_cache_entry(cp_index);  // should already have an entry
+          int cpc2 = add_secondary_cp_cache_entry(cpc);
           status = +1;
-        else
+        } else {
           status = -1;
+        }
         _method_handle_invokers[cp_index] = status;
       }
       // We use a special internal bytecode for such methods (if non-static).
@@ -195,6 +199,10 @@
     int cp_index = Bytes::get_Java_u2(p);
     int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
     int cpc2 = add_secondary_cp_cache_entry(cpc);
+    // The second secondary entry is required to store the MethodType and
+    // must be the next entry.
+    int cpc3 = add_secondary_cp_cache_entry(cpc);
+    assert(cpc2 + 1 == cpc3, err_msg_res("must be consecutive: %d + 1 == %d", cpc2, cpc3));
 
     // Replace the trailing four bytes with a CPC index for the dynamic
     // call site.  Unlike other CPC entries, there is one per bytecode,
--- a/src/share/vm/oops/constantPoolOop.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/oops/constantPoolOop.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -270,13 +270,7 @@
                                                    int which) {
   assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here");
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
-  int cache_index = get_cpcache_index(which);
-  if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
-    if (PrintMiscellaneous && (Verbose||WizardMode)) {
-      tty->print_cr("bad operand %d in:", which); cpool->print();
-    }
-    return NULL;
-  }
+  int cache_index = decode_cpcache_index(which, true);
   ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
   return e->method_if_resolved(cpool);
 }
@@ -284,44 +278,33 @@
 
 bool constantPoolOopDesc::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) {
   if (cpool->cache() == NULL)  return false;  // nothing to load yet
-  // XXX Is there a simpler way to get to the secondary entry?
-  ConstantPoolCacheEntry* e;
-  if (constantPoolCacheOopDesc::is_secondary_index(which)) {
-    e = cpool->cache()->secondary_entry_at(which);
-  } else {
-    int cache_index = get_cpcache_index(which);
-    if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
-      if (PrintMiscellaneous && (Verbose||WizardMode)) {
-        tty->print_cr("bad operand %d in:", which); cpool->print();
-      }
-      return false;
-    }
-    e = cpool->cache()->entry_at(cache_index);
-  }
+  int cache_index = decode_cpcache_index(which, true);
+  ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
   return e->has_appendix();
 }
 
 
 oop constantPoolOopDesc::appendix_at_if_loaded(constantPoolHandle cpool, int which) {
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
-  // XXX Is there a simpler way to get to the secondary entry?
-  ConstantPoolCacheEntry* e;
-  if (constantPoolCacheOopDesc::is_secondary_index(which)) {
-    e = cpool->cache()->secondary_entry_at(which);
-  } else {
-    int cache_index = get_cpcache_index(which);
-    if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
-      if (PrintMiscellaneous && (Verbose||WizardMode)) {
-        tty->print_cr("bad operand %d in:", which); cpool->print();
-      }
-      return NULL;
-    }
-    e = cpool->cache()->entry_at(cache_index);
-  }
-  if (!e->has_appendix()) {
-    return NULL;
-  }
-  return e->f1_as_instance();
+  int cache_index = decode_cpcache_index(which, true);
+  ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
+  return e->appendix_if_resolved(cpool);
+}
+
+
+bool constantPoolOopDesc::has_method_type_at_if_loaded(constantPoolHandle cpool, int which) {
+  if (cpool->cache() == NULL)  return false;  // nothing to load yet
+  int cache_index = decode_cpcache_index(which, true);
+  ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
+  return e->has_method_type();
+}
+
+oop constantPoolOopDesc::method_type_at_if_loaded(constantPoolHandle cpool, int which) {
+  if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
+  int cache_index = decode_cpcache_index(which, true);
+  ConstantPoolCacheEntry* e  = cpool->cache()->entry_at(cache_index);  // get next CPC entry
+  ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(e);
+  return e2->method_type_if_resolved(cpool);
 }
 
 
--- a/src/share/vm/oops/constantPoolOop.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/oops/constantPoolOop.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -674,6 +674,8 @@
   static methodOop       method_at_if_loaded      (constantPoolHandle this_oop, int which);
   static bool      has_appendix_at_if_loaded      (constantPoolHandle this_oop, int which);
   static oop           appendix_at_if_loaded      (constantPoolHandle this_oop, int which);
+  static bool   has_method_type_at_if_loaded      (constantPoolHandle this_oop, int which);
+  static oop        method_type_at_if_loaded      (constantPoolHandle this_oop, int which);
   static klassOop         klass_at_if_loaded      (constantPoolHandle this_oop, int which);
   static klassOop     klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
   // Same as above - but does LinkResolving.
@@ -704,6 +706,12 @@
 #endif //ASSERT
 
   static int get_cpcache_index(int index) { return index - CPCACHE_INDEX_TAG; }
+  static int decode_cpcache_index(int raw_index, bool invokedynamic_ok = false) {
+    if (invokedynamic_ok && constantPoolCacheOopDesc::is_secondary_index(raw_index))
+      return constantPoolCacheOopDesc::decode_secondary_index(raw_index);
+    else
+      return get_cpcache_index(raw_index);
+  }
 
  private:
 
--- a/src/share/vm/oops/cpCacheOop.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/oops/cpCacheOop.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -266,21 +266,23 @@
 
 
 void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool,
-                                               methodHandle adapter, Handle appendix) {
+                                               methodHandle adapter,
+                                               Handle appendix, Handle method_type) {
   assert(!is_secondary_entry(), "");
-  set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix);
+  set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix, method_type);
 }
 
 void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool,
-                                              methodHandle adapter, Handle appendix) {
+                                              methodHandle adapter,
+                                              Handle appendix, Handle method_type) {
   assert(is_secondary_entry(), "");
-  set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix);
+  set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix, method_type);
 }
 
 void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
                                                       Bytecodes::Code invoke_code,
                                                       methodHandle adapter,
-                                                      Handle appendix) {
+                                                      Handle appendix, Handle method_type) {
   // NOTE: This CPCE can be the subject of data races.
   // There are three words to update: flags, f2, f1 (in that order).
   // Writers must store all other values before f1.
@@ -296,23 +298,28 @@
     return;
   }
 
-  bool has_appendix = appendix.not_null();
+  const bool has_appendix    = appendix.not_null();
+  const bool has_method_type = method_type.not_null();
+
   if (!has_appendix) {
     // The extra argument is not used, but we need a non-null value to signify linkage state.
     // Set it to something benign that will never leak memory.
     appendix = Universe::void_mirror();
   }
 
+  // Write the flags.
   set_method_flags(as_TosState(adapter->result_type()),
-                   ((has_appendix ?  1 : 0) << has_appendix_shift) |
-                   (                 1      << is_vfinal_shift)    |
-                   (                 1      << is_final_shift),
+                   ((has_appendix    ? 1 : 0) << has_appendix_shift)    |
+                   ((has_method_type ? 1 : 0) << has_method_type_shift) |
+                   (                   1      << is_vfinal_shift)       |
+                   (                   1      << is_final_shift),
                    adapter->size_of_parameters());
 
   if (TraceInvokeDynamic) {
-    tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ",
+    tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
                   invoke_code,
-                  (intptr_t)appendix(), (has_appendix ? "" : " (unused)"),
+                  (intptr_t)appendix(),    (has_appendix    ? "" : " (unused)"),
+                  (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
                   (intptr_t)adapter());
     adapter->print();
     if (has_appendix)  appendix()->print();
@@ -336,14 +343,31 @@
   // The fact that String and List are involved is encoded in the MethodType in f1.
   // This allows us to create fewer method oops, while keeping type safety.
   //
+
   set_f2_as_vfinal_method(adapter());
+
+  // Store MethodType, if any.
+  if (has_method_type) {
+    ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(this);
+
+    // Write the flags.
+    e2->set_method_flags(as_TosState(adapter->result_type()),
+                     ((has_method_type ? 1 : 0) << has_method_type_shift) |
+                     (                   1      << is_vfinal_shift)       |
+                     (                   1      << is_final_shift),
+                     adapter->size_of_parameters());
+    e2->release_set_f1(method_type());
+  }
+
   assert(appendix.not_null(), "needed for linkage state");
   release_set_f1(appendix());  // This must be the last one to set (see NOTE above)!
+
   if (!is_secondary_entry()) {
     // The interpreter assembly code does not check byte_2,
     // but it is used by is_resolved, method_if_resolved, etc.
     set_bytecode_2(invoke_code);
   }
+
   NOT_PRODUCT(verify(tty));
   if (TraceInvokeDynamic) {
     this->print(tty, 0);
@@ -401,6 +425,20 @@
 }
 
 
+oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
+  if (is_f1_null() || !has_appendix())
+    return NULL;
+  return f1_appendix();
+}
+
+
+oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
+  if (is_f1_null() || !has_method_type())
+    return NULL;
+  return f1_as_instance();
+}
+
+
 class LocalOopClosure: public OopClosure {
  private:
   void (*_f)(oop*);
--- a/src/share/vm/oops/cpCacheOop.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/oops/cpCacheOop.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -167,10 +167,11 @@
     tos_state_mask             = right_n_bits(tos_state_bits),
     tos_state_shift            = BitsPerInt - tos_state_bits,  // see verify_tos_state_shift below
     // misc. option bits; can be any bit position in [16..27]
-    is_vfinal_shift            = 21,
-    is_volatile_shift          = 22,
-    is_final_shift             = 23,
-    has_appendix_shift         = 24,
+    is_vfinal_shift            = 20,
+    is_volatile_shift          = 21,
+    is_final_shift             = 22,
+    has_appendix_shift         = 23,
+    has_method_type_shift      = 24,
     is_forced_virtual_shift    = 25,
     is_field_entry_shift       = 26,
     // low order bits give field index (for FieldInfo) or method parameter size:
@@ -224,13 +225,15 @@
   void set_method_handle(
     constantPoolHandle cpool,                    // holding constant pool (required for locking)
     methodHandle method,                         // adapter for invokeExact, etc.
-    Handle appendix                              // stored in f1; could be a java.lang.invoke.MethodType
+    Handle appendix,                             // stored in f1; could be a java.lang.invoke.MethodType
+    Handle method_type                           // stored in f1 (of secondary entry); is a java.lang.invoke.MethodType
   );
 
   void set_dynamic_call(
     constantPoolHandle cpool,                    // holding constant pool (required for locking)
     methodHandle method,                         // adapter for this call site
-    Handle appendix                              // stored in f1; could be a java.lang.invoke.CallSite
+    Handle appendix,                             // stored in f1; could be a java.lang.invoke.CallSite
+    Handle method_type                           // stored in f1 (of secondary entry); is a java.lang.invoke.MethodType
   );
 
   // Common code for invokedynamic and MH invocations.
@@ -252,10 +255,13 @@
     constantPoolHandle cpool,                    // holding constant pool (required for locking)
     Bytecodes::Code invoke_code,                 // _invokehandle or _invokedynamic
     methodHandle adapter,                        // invoker method (f2)
-    Handle appendix                              // appendix such as CallSite, MethodType, etc. (f1)
+    Handle appendix,                             // appendix such as CallSite, MethodType, etc. (f1)
+    Handle method_type                           // MethodType (f1 of secondary entry)
   );
 
-  methodOop method_if_resolved(constantPoolHandle cpool);
+  methodOop      method_if_resolved(constantPoolHandle cpool);
+  oop          appendix_if_resolved(constantPoolHandle cpool);
+  oop       method_type_if_resolved(constantPoolHandle cpool);
 
   void set_parameter_size(int value);
 
@@ -267,11 +273,11 @@
       case Bytecodes::_getfield        :    // fall through
       case Bytecodes::_invokespecial   :    // fall through
       case Bytecodes::_invokestatic    :    // fall through
+      case Bytecodes::_invokehandle    :    // fall through
+      case Bytecodes::_invokedynamic   :    // fall through
       case Bytecodes::_invokeinterface : return 1;
       case Bytecodes::_putstatic       :    // fall through
       case Bytecodes::_putfield        :    // fall through
-      case Bytecodes::_invokehandle    :    // fall through
-      case Bytecodes::_invokedynamic   :    // fall through
       case Bytecodes::_invokevirtual   : return 2;
       default                          : break;
     }
@@ -310,7 +316,8 @@
   int  parameter_size() const                    { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
   bool is_volatile() const                       { return (_flags & (1 << is_volatile_shift))       != 0; }
   bool is_final() const                          { return (_flags & (1 << is_final_shift))          != 0; }
-  bool has_appendix() const                      { return (_flags & (1 << has_appendix_shift))     != 0; }
+  bool has_appendix() const                      { return (_flags & (1 << has_appendix_shift))      != 0; }
+  bool has_method_type() const                   { return (_flags & (1 << has_method_type_shift))   != 0; }
   bool is_forced_virtual() const                 { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
   bool is_vfinal() const                         { return (_flags & (1 << is_vfinal_shift))         != 0; }
   bool is_method_entry() const                   { return (_flags & (1 << is_field_entry_shift))    == 0; }
@@ -445,6 +452,29 @@
     return entry_at(primary_index);
   }
 
+  int index_of(ConstantPoolCacheEntry* e) {
+    assert(base() <= e && e < base() + length(), "oob");
+    int cpc_index = (e - base());
+    assert(entry_at(cpc_index) == e, "sanity");
+    return cpc_index;
+  }
+  ConstantPoolCacheEntry* find_secondary_entry_for(ConstantPoolCacheEntry* e) {
+    const int cpc_index = index_of(e);
+    if (e->is_secondary_entry()) {
+      ConstantPoolCacheEntry* e2 = entry_at(cpc_index + 1);
+      assert(e->main_entry_index() == e2->main_entry_index(), "");
+      return e2;
+    } else {
+      for (int i = length() - 1; i >= 0; i--) {
+        ConstantPoolCacheEntry* e2 = entry_at(i);
+        if (cpc_index == e2->main_entry_index())
+          return e2;
+      }
+    }
+    fatal("no secondary entry found");
+    return NULL;
+  }
+
   // Code generation
   static ByteSize base_offset()                  { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); }
   static ByteSize entry_offset(int raw_index) {
--- a/src/share/vm/opto/escape.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/opto/escape.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -1375,12 +1375,12 @@
     // Non-escaped allocation returned from Java or runtime call have
     // unknown values in fields.
     for (EdgeIterator i(pta); i.has_next(); i.next()) {
-      PointsToNode* ptn = i.get();
-      if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
-        if (add_edge(ptn, phantom_obj)) {
+      PointsToNode* field = i.get();
+      if (field->is_Field() && field->as_Field()->is_oop()) {
+        if (add_edge(field, phantom_obj)) {
           // New edge was added
           new_edges++;
-          add_field_uses_to_worklist(ptn->as_Field());
+          add_field_uses_to_worklist(field->as_Field());
         }
       }
     }
@@ -1402,30 +1402,30 @@
   // captured by Initialize node.
   //
   for (EdgeIterator i(pta); i.has_next(); i.next()) {
-    PointsToNode* ptn = i.get(); // Field (AddP)
-    if (!ptn->is_Field() || !ptn->as_Field()->is_oop())
+    PointsToNode* field = i.get(); // Field (AddP)
+    if (!field->is_Field() || !field->as_Field()->is_oop())
       continue; // Not oop field
-    int offset = ptn->as_Field()->offset();
+    int offset = field->as_Field()->offset();
     if (offset == Type::OffsetBot) {
       if (!visited_bottom_offset) {
         // OffsetBot is used to reference array's element,
         // always add reference to NULL to all Field nodes since we don't
         // known which element is referenced.
-        if (add_edge(ptn, null_obj)) {
+        if (add_edge(field, null_obj)) {
           // New edge was added
           new_edges++;
-          add_field_uses_to_worklist(ptn->as_Field());
+          add_field_uses_to_worklist(field->as_Field());
           visited_bottom_offset = true;
         }
       }
     } else {
       // Check only oop fields.
-      const Type* adr_type = ptn->ideal_node()->as_AddP()->bottom_type();
+      const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
       if (adr_type->isa_rawptr()) {
 #ifdef ASSERT
         // Raw pointers are used for initializing stores so skip it
         // since it should be recorded already
-        Node* base = get_addp_base(ptn->ideal_node());
+        Node* base = get_addp_base(field->ideal_node());
         assert(adr_type->isa_rawptr() && base->is_Proj() &&
                (base->in(0) == alloc),"unexpected pointer type");
 #endif
@@ -1435,10 +1435,54 @@
         offsets_worklist.append(offset);
         Node* value = NULL;
         if (ini != NULL) {
-          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
-          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
-          if (store != NULL && store->is_Store()) {
+          // StoreP::memory_type() == T_ADDRESS
+          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
+          Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
+          // Make sure initializing store has the same type as this AddP.
+          // This AddP may reference non existing field because it is on a
+          // dead branch of bimorphic call which is not eliminated yet.
+          if (store != NULL && store->is_Store() &&
+              store->as_Store()->memory_type() == ft) {
             value = store->in(MemNode::ValueIn);
+#ifdef ASSERT
+            if (VerifyConnectionGraph) {
+              // Verify that AddP already points to all objects the value points to.
+              PointsToNode* val = ptnode_adr(value->_idx);
+              assert((val != NULL), "should be processed already");
+              PointsToNode* missed_obj = NULL;
+              if (val->is_JavaObject()) {
+                if (!field->points_to(val->as_JavaObject())) {
+                  missed_obj = val;
+                }
+              } else {
+                if (!val->is_LocalVar() || (val->edge_count() == 0)) {
+                  tty->print_cr("----------init store has invalid value -----");
+                  store->dump();
+                  val->dump();
+                  assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
+                }
+                for (EdgeIterator j(val); j.has_next(); j.next()) {
+                  PointsToNode* obj = j.get();
+                  if (obj->is_JavaObject()) {
+                    if (!field->points_to(obj->as_JavaObject())) {
+                      missed_obj = obj;
+                      break;
+                    }
+                  }
+                }
+              }
+              if (missed_obj != NULL) {
+                tty->print_cr("----------field---------------------------------");
+                field->dump();
+                tty->print_cr("----------missed referernce to object-----------");
+                missed_obj->dump();
+                tty->print_cr("----------object referernced by init store -----");
+                store->dump();
+                val->dump();
+                assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
+              }
+            }
+#endif
           } else {
             // There could be initializing stores which follow allocation.
             // For example, a volatile field store is not collected
@@ -1451,10 +1495,10 @@
         }
         if (value == NULL) {
           // A field's initializing value was not recorded. Add NULL.
-          if (add_edge(ptn, null_obj)) {
+          if (add_edge(field, null_obj)) {
             // New edge was added
             new_edges++;
-            add_field_uses_to_worklist(ptn->as_Field());
+            add_field_uses_to_worklist(field->as_Field());
           }
         }
       }
@@ -1596,7 +1640,26 @@
       }
       // Verify that all fields have initializing values.
       if (field->edge_count() == 0) {
+        tty->print_cr("----------field does not have references----------");
         field->dump();
+        for (BaseIterator i(field); i.has_next(); i.next()) {
+          PointsToNode* base = i.get();
+          tty->print_cr("----------field has next base---------------------");
+          base->dump();
+          if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
+            tty->print_cr("----------base has fields-------------------------");
+            for (EdgeIterator j(base); j.has_next(); j.next()) {
+              j.get()->dump();
+            }
+            tty->print_cr("----------base has references---------------------");
+            for (UseIterator j(base); j.has_next(); j.next()) {
+              j.get()->dump();
+            }
+          }
+        }
+        for (UseIterator i(field); i.has_next(); i.next()) {
+          i.get()->dump();
+        }
         assert(field->edge_count() > 0, "sanity");
       }
     }
@@ -1956,7 +2019,7 @@
   if (is_JavaObject()) {
     return (this == ptn);
   }
-  assert(is_LocalVar(), "sanity");
+  assert(is_LocalVar() || is_Field(), "sanity");
   for (EdgeIterator i(this); i.has_next(); i.next()) {
     if (i.get() == ptn)
       return true;
@@ -3116,10 +3179,14 @@
     EscapeState fields_es = fields_escape_state();
     tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
-      tty->print("NSR");
+      tty->print("NSR ");
   }
   if (is_Field()) {
     FieldNode* f = (FieldNode*)this;
+    if (f->is_oop())
+      tty->print("oop ");
+    if (f->offset() > 0)
+      tty->print("+%d ", f->offset());
     tty->print("(");
     for (BaseIterator i(f); i.has_next(); i.next()) {
       PointsToNode* b = i.get();
--- a/src/share/vm/runtime/mutexLocker.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/runtime/mutexLocker.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -140,6 +140,7 @@
 Monitor* JfrMsg_lock                  = NULL;
 Mutex*   JfrBuffer_lock               = NULL;
 Mutex*   JfrStream_lock               = NULL;
+Monitor* PeriodicTask_lock            = NULL;
 
 #define MAX_NUM_MUTEX 128
 static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -285,6 +286,7 @@
   def(JfrMsg_lock                  , Monitor, nonleaf+2,   true);
   def(JfrBuffer_lock               , Mutex,   nonleaf+3,   true);
   def(JfrStream_lock               , Mutex,   nonleaf+4,   true);
+  def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
 }
 
 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
--- a/src/share/vm/runtime/mutexLocker.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/runtime/mutexLocker.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -142,6 +142,7 @@
 extern Monitor* JfrMsg_lock;                     // protects JFR messaging
 extern Mutex*   JfrBuffer_lock;                  // protects JFR buffer operations
 extern Mutex*   JfrStream_lock;                  // protects JFR stream access
+extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
 
 // A MutexLocker provides mutual exclusion with respect to a given mutex
 // for the scope which contains the locker.  The lock is an OS lock, not
--- a/src/share/vm/runtime/task.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/runtime/task.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -61,7 +61,7 @@
 }
 #endif
 
-void PeriodicTask::real_time_tick(size_t delay_time) {
+void PeriodicTask::real_time_tick(int delay_time) {
 #ifndef PRODUCT
   if (ProfilerCheckIntervals) {
     _ticks++;
@@ -73,19 +73,39 @@
     _intervalHistogram[ms]++;
   }
 #endif
-  int orig_num_tasks = _num_tasks;
-  for(int index = 0; index < _num_tasks; index++) {
-    _tasks[index]->execute_if_pending(delay_time);
-    if (_num_tasks < orig_num_tasks) { // task dis-enrolled itself
-      index--;  // re-do current slot as it has changed
-      orig_num_tasks = _num_tasks;
+
+  {
+    MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+    int orig_num_tasks = _num_tasks;
+
+    for(int index = 0; index < _num_tasks; index++) {
+      _tasks[index]->execute_if_pending(delay_time);
+      if (_num_tasks < orig_num_tasks) { // task dis-enrolled itself
+        index--;  // re-do current slot as it has changed
+        orig_num_tasks = _num_tasks;
+      }
     }
   }
 }
 
+int PeriodicTask::time_to_wait() {
+  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
+                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+
+  if (_num_tasks == 0) {
+    return 0; // sleep until shutdown or a task is enrolled
+  }
+
+  int delay = _tasks[0]->time_to_next_interval();
+  for (int index = 1; index < _num_tasks; index++) {
+    delay = MIN2(delay, _tasks[index]->time_to_next_interval());
+  }
+  return delay;
+}
+
 
 PeriodicTask::PeriodicTask(size_t interval_time) :
-  _counter(0), _interval(interval_time) {
+  _counter(0), _interval((int) interval_time) {
   // Sanity check the interval time
   assert(_interval >= PeriodicTask::min_interval &&
          _interval <= PeriodicTask::max_interval &&
@@ -94,33 +114,40 @@
 }
 
 PeriodicTask::~PeriodicTask() {
-  if (is_enrolled())
-    disenroll();
-}
-
-bool PeriodicTask::is_enrolled() const {
-  for(int index = 0; index < _num_tasks; index++)
-    if (_tasks[index] == this) return true;
-  return false;
+  disenroll();
 }
 
 void PeriodicTask::enroll() {
-  assert(WatcherThread::watcher_thread() == NULL, "dynamic enrollment of tasks not yet supported");
+  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
+                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
 
-  if (_num_tasks == PeriodicTask::max_tasks)
+  if (_num_tasks == PeriodicTask::max_tasks) {
     fatal("Overflow in PeriodicTask table");
+  }
   _tasks[_num_tasks++] = this;
+
+  WatcherThread* thread = WatcherThread::watcher_thread();
+  if (thread) {
+    thread->unpark();
+  } else {
+    WatcherThread::start();
+  }
 }
 
 void PeriodicTask::disenroll() {
-  assert(WatcherThread::watcher_thread() == NULL ||
-         Thread::current() == WatcherThread::watcher_thread(),
-         "dynamic disenrollment currently only handled from WatcherThread from within task() method");
+  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
+                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
 
   int index;
-  for(index = 0; index < _num_tasks && _tasks[index] != this; index++);
-  if (index == _num_tasks) return;
+  for(index = 0; index < _num_tasks && _tasks[index] != this; index++)
+    ;
+
+  if (index == _num_tasks) {
+    return;
+  }
+
   _num_tasks--;
+
   for (; index < _num_tasks; index++) {
     _tasks[index] = _tasks[index+1];
   }
--- a/src/share/vm/runtime/task.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/runtime/task.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -49,12 +49,12 @@
   static int num_tasks()   { return _num_tasks; }
 
  private:
-  size_t _counter;
-  const size_t _interval;
+  int _counter;
+  const int _interval;
 
   static int _num_tasks;
   static PeriodicTask* _tasks[PeriodicTask::max_tasks];
-  static void real_time_tick(size_t delay_time);
+  static void real_time_tick(int delay_time);
 
 #ifndef PRODUCT
   static elapsedTimer _timer;                      // measures time between ticks
@@ -69,51 +69,36 @@
   PeriodicTask(size_t interval_time); // interval is in milliseconds of elapsed time
   ~PeriodicTask();
 
-  // Tells whether is enrolled
-  bool is_enrolled() const;
-
   // Make the task active
-  // NOTE: this may only be called before the WatcherThread has been started
+  // For dynamic enrollment at the time T, the task will execute somewhere
+  // between T and T + interval_time.
   void enroll();
 
   // Make the task deactive
-  // NOTE: this may only be called either while the WatcherThread is
-  // inactive or by a task from within its task() method. One-shot or
-  // several-shot tasks may be implemented this way.
   void disenroll();
 
-  void execute_if_pending(size_t delay_time) {
-    _counter += delay_time;
-    if (_counter >= _interval) {
+  void execute_if_pending(int delay_time) {
+    // make sure we don't overflow
+    jlong tmp = (jlong) _counter + (jlong) delay_time;
+
+    if (tmp >= (jlong) _interval) {
       _counter = 0;
       task();
+    } else {
+      _counter += delay_time;
     }
   }
 
   // Returns how long (time in milliseconds) before the next time we should
   // execute this task.
-  size_t time_to_next_interval() const {
+  int time_to_next_interval() const {
     assert(_interval > _counter,  "task counter greater than interval?");
     return _interval - _counter;
   }
 
   // Calculate when the next periodic task will fire.
   // Called by the WatcherThread's run method.
-  // This assumes that periodic tasks aren't entering the system
-  // dynamically, except for during startup.
-  static size_t time_to_wait() {
-    if (_num_tasks == 0) {
-      // Don't wait any more; shut down the thread since we don't
-      // currently support dynamic enrollment.
-      return 0;
-    }
-
-    size_t delay = _tasks[0]->time_to_next_interval();
-    for (int index = 1; index < _num_tasks; index++) {
-      delay = MIN2(delay, _tasks[index]->time_to_next_interval());
-    }
-    return delay;
-  }
+  static int time_to_wait();
 
   // The task to perform at each period
   virtual void task() = 0;
--- a/src/share/vm/runtime/thread.cpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/runtime/thread.cpp	Fri Nov 09 07:31:56 2012 -0800
@@ -1161,6 +1161,7 @@
 // timer interrupts exists on the platform.
 
 WatcherThread* WatcherThread::_watcher_thread   = NULL;
+bool WatcherThread::_startable = false;
 volatile bool  WatcherThread::_should_terminate = false;
 
 WatcherThread::WatcherThread() : Thread() {
@@ -1181,6 +1182,55 @@
   }
 }
 
+int WatcherThread::sleep() const {
+  MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+
+  // remaining will be zero if there are no tasks,
+  // causing the WatcherThread to sleep until a task is
+  // enrolled
+  int remaining = PeriodicTask::time_to_wait();
+  int time_slept = 0;
+
+  // we expect this to timeout - we only ever get unparked when
+  // we should terminate or when a new task has been enrolled
+  OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
+
+  jlong time_before_loop = os::javaTimeNanos();
+
+  for (;;) {
+    bool timedout = PeriodicTask_lock->wait(Mutex::_no_safepoint_check_flag, remaining);
+    jlong now = os::javaTimeNanos();
+
+    if (remaining == 0) {
+        // if we didn't have any tasks we could have waited for a long time
+        // consider the time_slept zero and reset time_before_loop
+        time_slept = 0;
+        time_before_loop = now;
+    } else {
+        // need to recalulate since we might have new tasks in _tasks
+        time_slept = (int) ((now - time_before_loop) / 1000000);
+    }
+
+    // Change to task list or spurious wakeup of some kind
+    if (timedout || _should_terminate) {
+        break;
+    }
+
+    remaining = PeriodicTask::time_to_wait();
+    if (remaining == 0) {
+        // Last task was just disenrolled so loop around and wait until
+        // another task gets enrolled
+        continue;
+    }
+
+    remaining -= time_slept;
+    if (remaining <= 0)
+      break;
+  }
+
+  return time_slept;
+}
+
 void WatcherThread::run() {
   assert(this == watcher_thread(), "just checking");
 
@@ -1193,26 +1243,7 @@
 
     // Calculate how long it'll be until the next PeriodicTask work
     // should be done, and sleep that amount of time.
-    size_t time_to_wait = PeriodicTask::time_to_wait();
-
-    // we expect this to timeout - we only ever get unparked when
-    // we should terminate
-    {
-      OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
-
-      jlong prev_time = os::javaTimeNanos();
-      for (;;) {
-        int res= _SleepEvent->park(time_to_wait);
-        if (res == OS_TIMEOUT || _should_terminate)
-          break;
-        // spurious wakeup of some kind
-        jlong now = os::javaTimeNanos();
-        time_to_wait -= (now - prev_time) / 1000000;
-        if (time_to_wait <= 0)
-          break;
-        prev_time = now;
-      }
-    }
+    int time_waited = sleep();
 
     if (is_error_reported()) {
       // A fatal error has happened, the error handler(VMError::report_and_die)
@@ -1242,13 +1273,7 @@
       }
     }
 
-    PeriodicTask::real_time_tick(time_to_wait);
-
-    // If we have no more tasks left due to dynamic disenrollment,
-    // shut down the thread since we don't currently support dynamic enrollment
-    if (PeriodicTask::num_tasks() == 0) {
-      _should_terminate = true;
-    }
+    PeriodicTask::real_time_tick(time_waited);
   }
 
   // Signal that it is terminated
@@ -1263,22 +1288,33 @@
 }
 
 void WatcherThread::start() {
-  if (watcher_thread() == NULL) {
+  assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required");
+
+  if (watcher_thread() == NULL && _startable) {
     _should_terminate = false;
     // Create the single instance of WatcherThread
     new WatcherThread();
   }
 }
 
+void WatcherThread::make_startable() {
+  assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required");
+  _startable = true;
+}
+
 void WatcherThread::stop() {
+  {
+    MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+    _should_terminate = true;
+    OrderAccess::fence();  // ensure WatcherThread sees update in main loop
+
+    WatcherThread* watcher = watcher_thread();
+    if (watcher != NULL)
+      watcher->unpark();
+  }
+
   // it is ok to take late safepoints here, if needed
   MutexLocker mu(Terminator_lock);
-  _should_terminate = true;
-  OrderAccess::fence();  // ensure WatcherThread sees update in main loop
-
-  Thread* watcher = watcher_thread();
-  if (watcher != NULL)
-    watcher->_SleepEvent->unpark();
 
   while(watcher_thread() != NULL) {
     // This wait should make safepoint checks, wait without a timeout,
@@ -1296,6 +1332,11 @@
   }
 }
 
+void WatcherThread::unpark() {
+  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+  PeriodicTask_lock->notify();
+}
+
 void WatcherThread::print_on(outputStream* st) const {
   st->print("\"%s\" ", name());
   Thread::print_on(st);
@@ -3568,12 +3609,18 @@
     }
   }
 
-  // Start up the WatcherThread if there are any periodic tasks
-  // NOTE:  All PeriodicTasks should be registered by now. If they
-  //   aren't, late joiners might appear to start slowly (we might
-  //   take a while to process their first tick).
-  if (PeriodicTask::num_tasks() > 0) {
-    WatcherThread::start();
+  {
+      MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+      // Make sure the watcher thread can be started by WatcherThread::start()
+      // or by dynamic enrollment.
+      WatcherThread::make_startable();
+      // Start up the WatcherThread if there are any periodic tasks
+      // NOTE:  All PeriodicTasks should be registered by now. If they
+      //   aren't, late joiners might appear to start slowly (we might
+      //   take a while to process their first tick).
+      if (PeriodicTask::num_tasks() > 0) {
+          WatcherThread::start();
+      }
   }
 
   // Give os specific code one last chance to start
--- a/src/share/vm/runtime/thread.hpp	Thu Nov 08 18:46:17 2012 -0800
+++ b/src/share/vm/runtime/thread.hpp	Fri Nov 09 07:31:56 2012 -0800
@@ -711,6 +711,7 @@
  private:
   static WatcherThread* _watcher_thread;
 
+  static bool _startable;
   volatile static bool _should_terminate; // updated without holding lock
  public:
   enum SomeConstants {
@@ -727,6 +728,7 @@
   char* name() const { return (char*)"VM Periodic Task Thread"; }
   void print_on(outputStream* st) const;
   void print() const { print_on(tty); }
+  void unpark();
 
   // Returns the single instance of WatcherThread
   static WatcherThread* watcher_thread()         { return _watcher_thread; }
@@ -734,6 +736,12 @@
   // Create and start the single instance of WatcherThread, or stop it on shutdown
   static void start();
   static void stop();
+  // Only allow start once the VM is sufficiently initialized
+  // Otherwise the first task to enroll will trigger the start
+  static void make_startable();
+
+ private:
+  int sleep() const;
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/8002069/Test8002069.java	Fri Nov 09 07:31:56 2012 -0800
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8002069
+ * @summary Assert failed in C2: assert(field->edge_count() > 0) failed: sanity
+ *
+ * @run main/othervm -Xmx32m -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:CompileCommand=exclude,Test8002069.dummy Test8002069
+ */
+
+abstract class O {
+  int f;
+  public O() { f = 5; }
+  abstract void put(int i);
+  public int foo(int i) {
+    put(i);
+    return i;
+  }
+};
+
+class A extends O {
+  int[] a;
+  public A(int s) {
+    a = new int[s];
+  }
+  public void put(int i) {
+    a[i%a.length] = i;
+  }
+}
+
+class B extends O {
+  int sz;
+  int[] a;
+  public B(int s) {
+    sz = s;
+    a = new int[s];
+  }
+  public void put(int i) {
+    a[i%sz] = i;
+  }
+}
+
+public class Test8002069 {
+  public static void main(String args[]) {
+    int sum = 0;
+    for (int i=0; i<8000; i++) {
+      sum += test1(i);
+    }
+    for (int i=0; i<100000; i++) {
+      sum += test2(i);
+    }
+    System.out.println("PASSED. sum = " + sum);
+  }
+
+  private O o;
+
+  private int foo(int i) {
+    return o.foo(i);
+  }
+  static int test1(int i) {
+    Test8002069 t = new Test8002069();
+    t.o = new A(5);
+    return t.foo(i);
+  }
+  static int test2(int i) {
+    Test8002069 t = new Test8002069();
+    t.o = new B(5);
+    dummy(i);
+    return t.foo(i);
+  }
+
+  static int dummy(int i) {
+    return i*2;
+  }
+}
+