# HG changeset patch # User adinn # Date 1417087630 0 # Node ID 205e1ae8868b6dd3652acbb77ba0973f37ac6639 # Parent 80e04c4cd4b2b5ef55844174e8b397e42114ace4# Parent 5c40e2190b6ee39a817f28bf16af2eee4cb6868b merge diff -r 80e04c4cd4b2 -r 205e1ae8868b .hgtags --- a/.hgtags Tue Nov 25 17:36:55 2014 +0000 +++ b/.hgtags Thu Nov 27 11:27:10 2014 +0000 @@ -631,6 +631,7 @@ 408028d410e316a99495c42df0031018890c22fe jdk7u55-b02 50fb91504dd8cdf410eb956075442daf3aacf1db jdk7u55-b03 3be3b8a032a5508646c1c5620cee18d3e69fc708 jdk7u55-b04 +b86119fa2748bd91ae4984ff2264da92b6626f8c jdk7u65-b00 b86119fa2748bd91ae4984ff2264da92b6626f8c jdk7u55-b05 260d919d52e500a0b20f911fade2a7710474067a jdk7u55-b06 8cf6e0a3a0651c4132ae034c2b68ddf4eb5c4d88 jdk7u55-b07 @@ -643,7 +644,13 @@ b021fd817a0177b31d1e3d65127a27458e85801e jdk7u55-b13 d27b468d5f3be3329ff1ff342f3347e6b2e0303b jdk7u55-b30 dff9147a781672f20bb0577a94233264ea4a95d1 jdk7u55-b14 +4e73c6403b44f86d579b1ba03ea636c52c4b559b jdk7u55-b15 8175599864880938d68d0a515fa561043d7d5fd0 jdk7u55-b31 +ba9270b8fb1f4852ff1d9dab15571eb9e0714495 jdk7u55-b32 +0901a8cf66a0494b55bf104c9666d4e3c6ff93f0 jdk7u55-b33 +278d7e230b297a4632b94ddc07d591e74736e039 jdk7u55-b34 +db88943dba0b7672a09e22974934022fbe8ba8dd jdk7u55-b35 +b3e388601b0fc0922b311e2cc68b9417cedd16ef jdk7u55-b36 ae4adc1492d1c90a70bd2d139a939fc0c8329be9 jdk7u60-b00 af1fc2868a2b919727bfbb0858449bd991bbee4a jdk7u40-b60 cc83359f5e5eb46dd9176b0a272390b1a0a51fdc hs24.60-b01 @@ -672,9 +679,80 @@ b226be2040f971855626f5b88cb41a7d5299fea0 jdk7u60-b14 2871f345b7e5585e20dc7aa91035967fe774cfba jdk7u60-b15 ec76bacbb5b90efc7988dee5345c656126b97561 jdk7u60-b16 +617a6338e0c4f7699eed5061d7e8f576c3ace029 jdk7u60-b17 617a6338e0c4f7699eed5061d7e8f576c3ace029 jdk7u60-b18 -617a6338e0c4f7699eed5061d7e8f576c3ace029 jdk7u60-b17 +4a9635c98a917cfcef506ca5d034c733a33c53f3 jdk7u65-b01 361493c7cdb5f75b28efc63389d6cebaaaa43a2c jdk7u60-b19 +13f561930b3e80a94e2baddc51dfc6c43c5ca601 jdk7u60-b30 +35b2dbe7f7c69ea0f2feb1e66fe8651511a5fb6d jdk7u60-b31 +f166d2e391993f1b12b4ad1685baf999c78e6372 jdk7u60-b32 +cc1fea28c886ef100632247a708eac0c83640914 jdk7u60-b33 +eb797fab50d3b440b17b3e7c5d83f42bfa73655e jdk7u65-b02 +bb00df28ecdbd0da89ab4ed81f6f2b732fa512da jdk7u65-b03 +848481af9003067546c7f34c166bb8d745b95d5f jdk7u65-b04 +98a884fa64a9ef1753a28691106efe10942b9d70 jdk7u65-b05 +6f1dddf9c632bfb14121c9521d17b64bd0be0cd2 jdk7u65-b06 +a053d3d805355ffcd85c17e653182e17d4456bd5 jdk7u65-b07 +6f03dfb50363d26599fcf726586ea3f6d0e0347d jdk7u65-b08 +b4930eb1ea7630b4d8609e2efe6f000d3dc83235 jdk7u65-b09 +4736382ac9d999044b05eb26932ab6fc59dbb159 jdk7u65-b10 +7345c7bf20fd8c91492240a95082af9a201b3a96 jdk7u65-b11 +28b81694b89f88541e28bbc767d78e77ec66cce6 jdk7u65-b12 +f4ed018b4c51dae699da835617b19e8a49c124a4 jdk7u65-b13 +7ec585caae47f7202fb5357607f9ad058b03870e jdk7u65-b14 +7058f0d30de6826b6866ce2d146c63e943be33af jdk7u65-b15 +f1b2970a2564c3360db420431cfbba215da6ae43 jdk7u65-b16 +4c6df9a369cb9d54fe2d898452883a22b8ec6640 jdk7u65-b17 +aca05127f95b5704ee3a34104a8f86e36326f0c0 jdk7u65-b30 +d006213be74730453cf5c3ce31f1d1d505334419 jdk7u65-b18 +1d8226b3e9896656451801393eb3ae394faeb638 jdk7u65-b19 +c43b0b843f897a4d8cf0a3566b017b87230dd3b4 jdk7u65-b32 +d3c9265e12fa115052f18d1e3d379143b56bbf63 jdk7u65-b20 +39776d90970221dd260187acb4c37631e41a66a9 jdk7u67-b01 +1d8226b3e9896656451801393eb3ae394faeb638 jdk7u65-b40 +cf8b3a090e597e59177c5f67d44cdec12309777f jdk7u65-b31 +df855c3f4d31dd7db081d68e3054518380127893 jdk7u65-b33 +6b37a189944aaa09e81d97d394496464d16bee42 jdk7u66-b00 +121dc94194d9234e2b13c867d875e23e1bdd6abd jdk7u66-b01 +f28ea516eb0b9e99f1e342954ab4642456af4da1 jdk7u66-b09 +3dc6ae1972a45ba563518cc0e51f09885258f69d jdk7u66-b10 +8d2b3f7d5b3001d019832476d684679ca6be0c8d jdk7u66-b11 +5ee19b64ef208daaef91f063d800aa162427f8f6 jdk7u66-b12 +a1e6f9c4c1f47be1b0edef6bd92399f8f07b7d15 jdk7u66-b13 +b44baba406f2de6eeccc57dbfae653cf124b527b jdk7u66-b14 +d20b495c96d3f8899a64657aba0fc72799773cb3 jdk7u66-b15 +3bbfed065c601187449d319fd70bba6ae1ebb707 jdk7u66-b16 +4abb71ff14b2e6cf932e5c61900f480d5e1afedb jdk7u66-b17 +4ceb9c03fe8ee6b93d22854780ef8c737edd14b2 jdk7u71-b00 +f95d6d32e08006209f1798f82b60d7d05767a3e8 jdk7u71-b01 +1c760efe2d0795f4ce8260ec655b8870bfd77ca1 jdk7u71-b02 +0cb0b5abd0b5aa25fc8bd5920c8d61c5b85a10c6 jdk7u71-b03 +a491e5e52998c23502ebb1340955e3e726d44ad6 jdk7u71-b04 +c93efe6377ffd7484c50cba9a88a37bebf525114 jdk7u71-b05 +f95fa655cc119659686ba68c7242497fd209f9e1 jdk7u71-b06 +7f32b65fde34db41bf951ed81374240840ef88f4 jdk7u71-b07 +4e17bd4fb2304d068023d9d805e86d6b592d4230 jdk7u71-b08 +1ffc702334d960aa4015e5cc6f4fb9e971952b54 jdk7u71-b09 +9a17c184bcb99f13dc6ab714ad98976410429637 jdk7u71-b10 +d6cb97651f0bd8d61f4d22aa7550145bbe6fb051 jdk7u71-b11 +959b4e5d2e3111920c198187f3bc66eba3e457f1 jdk7u71-b12 +608f470d22689bab17bab0ea1dbee3e1a0802d5b jdk7u71-b13 +ad909197a1ce2df483a20ff9ac380382f779a9d3 jdk7u71-b14 +1bd3adac3aac3c29c81303812b35f484ff90cb2b jdk7u72-b01 +0caed46767e35c00eff69b22acf984d98eb66b3d jdk7u72-b02 +3a2934191de4bb8ca9d2faca93f3381e521e8cac jdk7u72-b03 +e4708cde2898df4c936595aacb57bc5b4e15869a jdk7u72-b04 +137e0859cd296cb8d9f9e327112ddc793ed59318 jdk7u72-b05 +4d9d227d70f33b70461230172386217317954312 jdk7u72-b06 +ece56f93f37b41b9c8875e54fbd8010277f6b460 jdk7u72-b07 +439c695a7aa03652ab92681120434b9ce8cdd2b7 jdk7u72-b08 +a27f16d45457a68a723acca621cb11bc173a0eb6 jdk7u72-b09 +e6508ab77271d1d3ce7b5f60d91a7334fdacb03a jdk7u72-b10 +c17a8487086433e14cd22373039a8b6b48e7cbb8 jdk7u72-b11 +a9e695f0d831f115720a4dcad3d33e0003b0acad jdk7u72-b12 +ac701f87d1ea46033c69f3e1cb84fc0a971da70c jdk7u72-b13 +d9b56c6bdddb6f9d8242230f5fdd58f9c7d30ea5 jdk7u72-b14 +a6ae698522bfab3c595a4f8c2c3ee7e8939eb1bb jdk7u72-b30 b92f390febd01615af4a736b4f830f6052aa1d09 hs24.80-b00 1448ebfef4f1aae0174eca983ad05507730ca6fd hs24.80-b01 b1d29549dca7e36a4d050af5a54f8f56963a5c7d hs24.80-b02 @@ -689,3 +767,11 @@ 2fd819c8b5066a480f9524d901dbd34f2cf563ad icedtea-2.6pre04 fae3b09fe959294f7a091a6ecaae91daf1cb4f5c icedtea-2.6pre05 05fe7a87d14908eb3f21a0d29fc72cee2f996b7f jdk7u80-b00 +e2533d62ca887078e4b952a75a75680cfb7894b9 jdk7u80-b01 +8ffb87775f56ed5c602f320d2513351298ee4778 icedtea-2.6pre07 +b517477362d1b0d4f9b567c82db85136fd14bc6e icedtea-2.6pre06 +6d5ec408f4cac2c2004bf6120403df1b18051a21 icedtea-2.6pre08 +bad107a5d096b070355c5a2d80aa50bc5576144b jdk7u80-b02 +4722cfd15c8386321c8e857951b3cb55461e858b icedtea-2.6pre09 +c8417820ac943736822e7b84518b5aca80f39593 icedtea-2.6pre10 +e13857ecc7870c28dbebca79ff36612693dac157 icedtea-2.6pre11 diff -r 80e04c4cd4b2 -r 205e1ae8868b make/bsd/Makefile --- a/make/bsd/Makefile Tue Nov 25 17:36:55 2014 +0000 +++ b/make/bsd/Makefile Thu Nov 27 11:27:10 2014 +0000 @@ -282,48 +282,76 @@ $(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH) platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in - $(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ +ifeq ($(ZERO_ARCHDEF),PPC) + ifndef LP64 + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC32/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + else + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC64/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + endif +else + ifeq ($(ZERO_ARCHDEF),PPC64) + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC64/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + else + ifeq ($(ZERO_ARCHDEF),PPC32) + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC32/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + else + $(SED) 's/@ZERO_ARCHDEF@/-D$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + endif + endif +endif # Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME $(TARGETS_C2): $(SUBDIRS_C2) cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install endif $(TARGETS_TIERED): $(SUBDIRS_TIERED) cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install endif $(TARGETS_C1): $(SUBDIRS_C1) cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install endif $(TARGETS_CORE): $(SUBDIRS_CORE) cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install endif $(TARGETS_ZERO): $(SUBDIRS_ZERO) cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install endif $(TARGETS_SHARK): $(SUBDIRS_SHARK) cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install endif diff -r 80e04c4cd4b2 -r 205e1ae8868b make/bsd/makefiles/gcc.make --- a/make/bsd/makefiles/gcc.make Tue Nov 25 17:36:55 2014 +0000 +++ b/make/bsd/makefiles/gcc.make Thu Nov 27 11:27:10 2014 +0000 @@ -137,6 +137,20 @@ # Ineffecient 16-byte stack re-alignment on Darwin/IA32 ARCHFLAG/i486 += -mstackrealign endif +# gcc bug http://gcc.gnu.org/PR63341 in ppc code generation requires -fno-tree-vectorize for now +ARCHFLAG/ppc += -fno-tree-vectorize +ARCHFLAG/ppc64 += -fno-tree-vectorize +ifeq ($(TYPE),ZERO) + ifeq ($(ZERO_ARCHDEF),PPC) + ARCHFLAG/zero += -fno-tree-vectorize + endif + ifeq ($(ZERO_ARCHDEF),PPC32) + ARCHFLAG/zero += -fno-tree-vectorize + endif + ifeq ($(ZERO_ARCHDEF),PPC64) + ARCHFLAG/zero += -fno-tree-vectorize + endif +endif CFLAGS += $(ARCHFLAG) AOUT_FLAGS += $(ARCHFLAG) diff -r 80e04c4cd4b2 -r 205e1ae8868b make/bsd/makefiles/mapfile-vers-debug --- a/make/bsd/makefiles/mapfile-vers-debug Tue Nov 25 17:36:55 2014 +0000 +++ b/make/bsd/makefiles/mapfile-vers-debug Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ # -# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -82,6 +82,7 @@ _JVM_EnableCompiler _JVM_Exit _JVM_FillInStackTrace + _JVM_FindClassFromCaller _JVM_FindClassFromClass _JVM_FindClassFromClassLoader _JVM_FindClassFromBootLoader diff -r 80e04c4cd4b2 -r 205e1ae8868b make/bsd/makefiles/mapfile-vers-product --- a/make/bsd/makefiles/mapfile-vers-product Tue Nov 25 17:36:55 2014 +0000 +++ b/make/bsd/makefiles/mapfile-vers-product Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ # -# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -82,6 +82,7 @@ _JVM_EnableCompiler _JVM_Exit _JVM_FillInStackTrace + _JVM_FindClassFromCaller _JVM_FindClassFromClass _JVM_FindClassFromClassLoader _JVM_FindClassFromBootLoader diff -r 80e04c4cd4b2 -r 205e1ae8868b make/bsd/platform_zero.in --- a/make/bsd/platform_zero.in Tue Nov 25 17:36:55 2014 +0000 +++ b/make/bsd/platform_zero.in Thu Nov 27 11:27:10 2014 +0000 @@ -14,4 +14,4 @@ gnu_dis_arch = zero -sysdefs = -D_ALLBSD_SOURCE -D_GNU_SOURCE -DCC_INTERP -DZERO -D@ZERO_ARCHDEF@ -DZERO_LIBARCH=\"@ZERO_LIBARCH@\" +sysdefs = -D_ALLBSD_SOURCE -D_GNU_SOURCE -DCC_INTERP -DZERO @ZERO_ARCHDEF@ -DZERO_LIBARCH=\"@ZERO_LIBARCH@\" diff -r 80e04c4cd4b2 -r 205e1ae8868b make/defs.make --- a/make/defs.make Tue Nov 25 17:36:55 2014 +0000 +++ b/make/defs.make Thu Nov 27 11:27:10 2014 +0000 @@ -319,10 +319,6 @@ LP64_ARCH = sparcv9 amd64 aarch64 ia64 ppc64 zero endif -ifeq ($(ARCH), ppc64) - CC_INTERP=true -endif - # Required make macro settings for all platforms MAKE_ARGS += JAVA_HOME=$(ABS_BOOTDIR) MAKE_ARGS += OUTPUTDIR=$(ABS_OUTPUTDIR) @@ -359,6 +355,9 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h +# By default, run Queens test after building +TEST_IN_BUILD ?= true + ifndef JAVASE_EMBEDDED EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h ifneq (${ARCH},arm) diff -r 80e04c4cd4b2 -r 205e1ae8868b make/hotspot_version diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/Makefile --- a/make/linux/Makefile Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/Makefile Thu Nov 27 11:27:10 2014 +0000 @@ -306,7 +306,23 @@ $(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH) platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in - $(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ +ifeq ($(ZERO_ARCHDEF),PPC) + ifndef LP64 + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC32/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + else + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC64/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + endif +else + ifeq ($(ZERO_ARCHDEF),PPC64) + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC64/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + else + ifeq ($(ZERO_ARCHDEF),PPC32) + $(SED) 's/@ZERO_ARCHDEF@/-DPPC -DPPC32/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + else + $(SED) 's/@ZERO_ARCHDEF@/-D$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@ + endif + endif +endif # Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME @@ -318,7 +334,9 @@ endif else cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install endif @@ -332,7 +350,9 @@ endif else cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install endif @@ -346,7 +366,9 @@ endif else cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install endif @@ -360,7 +382,9 @@ endif else cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install endif @@ -368,14 +392,18 @@ $(TARGETS_ZERO): $(SUBDIRS_ZERO) cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install endif $(TARGETS_SHARK): $(SUBDIRS_SHARK) cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install endif diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/makefiles/gcc.make --- a/make/linux/makefiles/gcc.make Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/makefiles/gcc.make Thu Nov 27 11:27:10 2014 +0000 @@ -107,6 +107,20 @@ ARCHFLAG/ppc = -mcpu=powerpc endif ARCHFLAG/ppc64 = -m64 +# gcc bug http://gcc.gnu.org/PR63341 in ppc code generation requires -fno-tree-vectorize for now +ARCHFLAG/ppc += -fno-tree-vectorize +ARCHFLAG/ppc64 += -fno-tree-vectorize +ifeq ($(TYPE),ZERO) + ifeq ($(ZERO_ARCHDEF),PPC) + ARCHFLAG/zero += -fno-tree-vectorize + endif + ifeq ($(ZERO_ARCHDEF),PPC32) + ARCHFLAG/zero += -fno-tree-vectorize + endif + ifeq ($(ZERO_ARCHDEF),PPC64) + ARCHFLAG/zero += -fno-tree-vectorize + endif +endif CFLAGS += $(ARCHFLAG) AOUT_FLAGS += $(ARCHFLAG) diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/makefiles/mapfile-vers-debug --- a/make/linux/makefiles/mapfile-vers-debug Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/makefiles/mapfile-vers-debug Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ # -# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -84,6 +84,7 @@ JVM_EnableCompiler; JVM_Exit; JVM_FillInStackTrace; + JVM_FindClassFromCaller; JVM_FindClassFromClass; JVM_FindClassFromClassLoader; JVM_FindClassFromBootLoader; diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/makefiles/mapfile-vers-product --- a/make/linux/makefiles/mapfile-vers-product Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/makefiles/mapfile-vers-product Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ # -# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -84,6 +84,7 @@ JVM_EnableCompiler; JVM_Exit; JVM_FillInStackTrace; + JVM_FindClassFromCaller; JVM_FindClassFromClass; JVM_FindClassFromClassLoader; JVM_FindClassFromBootLoader; diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/makefiles/rules.make --- a/make/linux/makefiles/rules.make Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/makefiles/rules.make Thu Nov 27 11:27:10 2014 +0000 @@ -30,17 +30,11 @@ DEMANGLER = c++filt DEMANGLE = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@ -# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler -# (CC/g++). FIXME: $(CXXFLAGS) currently only includes preprocessor -# flags while $(CFLAGS) includes C and C++ flags. Ideally, there -# should be three variables: $(CFLAGS), $(CXXFLAGS) and $(CPPFLAGS). -# !!! FIXME AARCH64 -# on aarch64 the definition for CC_COMPILE must include CFLAGS here -# otherwise the -D options added in aarch64.make don't get passed to -# the compiler when we are assembling .S files. this ought ot be ok -# for all architectures but we need to check that is the case -#CC_COMPILE = $(CC) $(CXXFLAGS) -CC_COMPILE = $(CC) $(CXXFLAGS) $(CFLAGS) +# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++). +# FIXME: $(CXXFLAGS) currently only includes preprocessor flags while +# $(CFLAGS) includes C and C++ flags. Ideally, there should be three +# variables: $(CFLAGS), $(CXXFLAGS) and $(CPPFLAGS). +CC_COMPILE = $(CC) $(CXXFLAGS) CXX_COMPILE = $(CXX) $(CXXFLAGS) $(CFLAGS) AS.S = $(AS) $(ASFLAGS) @@ -165,7 +159,7 @@ %.o: %.S @echo Assembling $< $(QUIETLY) $(REMOVE_TARGET) - $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) + $(COMPILE.CC) $(CFLAGS) -o $@ $< $(COMPILE_DONE) %.s: %.cpp @echo Generating assembly for $< diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/makefiles/vm.make --- a/make/linux/makefiles/vm.make Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/makefiles/vm.make Thu Nov 27 11:27:10 2014 +0000 @@ -355,11 +355,6 @@ # the text relocation to libjvm.so considering that it is built as a non-PIC # DSO. To workaround that, we run chcon to libjvm.so after it is built. See # details in bug 6538311. -# !!! FIXME AARCH64 -# the logic of the debuginfo management when STRIP_POLICY==nostrip has -# been tweaked here to work for AARCH64. it probably ought to be -# correct for all architectures but still needs checking - $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT) $(QUIETLY) { \ echo Linking vm...; \ @@ -382,30 +377,28 @@ fi \ } - ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) - ifneq ($(STRIP_POLICY),no_strip) +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifneq ($(STRIP_POLICY),no_strip) $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO) $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@ - endif - ifeq ($(STRIP_POLICY),all_strip) + endif + ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ - else - ifeq ($(STRIP_POLICY),min_strip) + else + ifeq ($(STRIP_POLICY),min_strip) $(QUIETLY) $(STRIP) -g $@ - # implied else here is no stripping at all - endif - endif - ifneq ($(STRIP_POLICY),no_strip) - $(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO) - ifeq ($(ZIP_DEBUGINFO_FILES),1) - ifneq ($(STRIP_POLICY),no_strip) - $(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO) + # implied else here is no stripping at all + endif + endif + ifneq ($(STRIP_POLICY),no_strip) + $(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO) + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO) $(RM) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO) [ -f $(LIBJVM_G_DIZ) ] || { ln -s $(LIBJVM_DIZ) $(LIBJVM_G_DIZ); } - endif - endif - endif - endif + endif + endif +endif DEST_SUBDIR = $(JDK_LIBDIR)/$(VM_SUBDIR) DEST_JVM = $(DEST_SUBDIR)/$(LIBJVM) diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/makefiles/zeroshark.make --- a/make/linux/makefiles/zeroshark.make Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/makefiles/zeroshark.make Thu Nov 27 11:27:10 2014 +0000 @@ -57,11 +57,6 @@ endif endif -%.o: %.S - @echo Assembling $< - $(QUIETLY) $(REMOVE_TARGET) - $(COMPILE.CC) $(CFLAGS) -o $@ $< $(COMPILE_DONE) - # The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT) # The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/platform_ppc64 --- a/make/linux/platform_ppc64 Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/platform_ppc64 Thu Nov 27 11:27:10 2014 +0000 @@ -14,4 +14,4 @@ gnu_dis_arch = ppc64 -sysdefs = -DLINUX -D_GNU_SOURCE -DPPC64 -DCC_INTERP +sysdefs = -DLINUX -D_GNU_SOURCE -DPPC64 diff -r 80e04c4cd4b2 -r 205e1ae8868b make/linux/platform_zero.in --- a/make/linux/platform_zero.in Tue Nov 25 17:36:55 2014 +0000 +++ b/make/linux/platform_zero.in Thu Nov 27 11:27:10 2014 +0000 @@ -14,4 +14,4 @@ gnu_dis_arch = zero -sysdefs = -DLINUX -D_GNU_SOURCE -DCC_INTERP -DZERO -DTARGET_ARCH_NYI_6939861=1 -D@ZERO_ARCHDEF@ -DZERO_LIBARCH=\"@ZERO_LIBARCH@\" +sysdefs = -DLINUX -D_GNU_SOURCE -DCC_INTERP -DZERO @ZERO_ARCHDEF@ -DZERO_LIBARCH=\"@ZERO_LIBARCH@\" diff -r 80e04c4cd4b2 -r 205e1ae8868b make/solaris/Makefile --- a/make/solaris/Makefile Tue Nov 25 17:36:55 2014 +0000 +++ b/make/solaris/Makefile Thu Nov 27 11:27:10 2014 +0000 @@ -231,28 +231,36 @@ $(TARGETS_C2): $(SUBDIRS_C2) cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install endif $(TARGETS_TIERED): $(SUBDIRS_TIERED) cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install endif $(TARGETS_C1): $(SUBDIRS_C1) cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install endif $(TARGETS_CORE): $(SUBDIRS_CORE) cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) +ifeq ($(TEST_IN_BUILD),true) cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && ./test_gamma +endif ifdef INSTALL cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install endif diff -r 80e04c4cd4b2 -r 205e1ae8868b make/solaris/makefiles/mapfile-vers --- a/make/solaris/makefiles/mapfile-vers Tue Nov 25 17:36:55 2014 +0000 +++ b/make/solaris/makefiles/mapfile-vers Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ # -# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -84,6 +84,7 @@ JVM_EnableCompiler; JVM_Exit; JVM_FillInStackTrace; + JVM_FindClassFromCaller; JVM_FindClassFromClass; JVM_FindClassFromClassLoader; JVM_FindClassFromBootLoader; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/cpu/sparc/vm/copy_sparc.hpp --- a/src/cpu/sparc/vm/copy_sparc.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/cpu/sparc/vm/copy_sparc.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -184,7 +184,7 @@ assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); if (value == 0 && UseBlockZeroing && - (count > (BlockZeroingLowLimit >> LogHeapWordSize))) { + (count > (size_t)(BlockZeroingLowLimit >> LogHeapWordSize))) { // Call it only when block zeroing is used ((_zero_Fn)StubRoutines::zero_aligned_words())(tohw, count); } else { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -614,8 +614,8 @@ // Save the regs and make space for a C call __ save(SP, -96, SP); __ save_all_globals_into_locals(); - BLOCK_COMMENT("call os::naked_sleep"); - __ call(CAST_FROM_FN_PTR(address, os::naked_sleep)); + BLOCK_COMMENT("call os::naked_short_sleep"); + __ call(CAST_FROM_FN_PTR(address, os::naked_short_sleep)); __ delayed()->nop(); __ restore_globals_from_locals(); __ restore(); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/cpu/sparc/vm/vm_version_sparc.cpp --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -98,6 +98,27 @@ _supports_cx8 = has_v9(); _supports_atomic_getset4 = true; // swap instruction + // There are Fujitsu Sparc64 CPUs which support blk_init as well so + // we have to take this check out of the 'is_niagara()' block below. + if (has_blk_init()) { + // When using CMS or G1, we cannot use memset() in BOT updates + // because the sun4v/CMT version in libc_psr uses BIS which + // exposes "phantom zeros" to concurrent readers. See 6948537. + if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) { + FLAG_SET_DEFAULT(UseMemSetInBOT, false); + } + // Issue a stern warning if the user has explicitly set + // UseMemSetInBOT (it is known to cause issues), but allow + // use for experimentation and debugging. + if (UseConcMarkSweepGC || UseG1GC) { + if (UseMemSetInBOT) { + assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error"); + warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability" + " on sun4v; please understand that you are using at your own risk!"); + } + } + } + if (is_niagara()) { // Indirect branch is the same cost as direct if (FLAG_IS_DEFAULT(UseInlineCaches)) { @@ -107,12 +128,6 @@ if (FLAG_IS_DEFAULT(OptoLoopAlignment)) { FLAG_SET_DEFAULT(OptoLoopAlignment, 4); } - // When using CMS or G1, we cannot use memset() in BOT updates - // because the sun4v/CMT version in libc_psr uses BIS which - // exposes "phantom zeros" to concurrent readers. See 6948537. - if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) { - FLAG_SET_DEFAULT(UseMemSetInBOT, false); - } #ifdef _LP64 // 32-bit oops don't make sense for the 64-bit VM on sparc // since the 32-bit VM has the same registers and smaller objects. diff -r 80e04c4cd4b2 -r 205e1ae8868b src/cpu/sparc/vm/vm_version_sparc.hpp --- a/src/cpu/sparc/vm/vm_version_sparc.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -48,7 +48,8 @@ sparc64_family = 14, M_family = 15, T_family = 16, - T1_model = 17 + T1_model = 17, + sparc5_instructions = 18 }; enum Feature_Flag_Set { @@ -73,6 +74,7 @@ M_family_m = 1 << M_family, T_family_m = 1 << T_family, T1_model_m = 1 << T1_model, + sparc5_instructions_m = 1 << sparc5_instructions, generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m, generic_v9_m = generic_v8_m | v9_instructions_m, @@ -94,7 +96,13 @@ static bool is_M_family(int features) { return (features & M_family_m) != 0; } static bool is_T_family(int features) { return (features & T_family_m) != 0; } static bool is_niagara() { return is_T_family(_features); } - DEBUG_ONLY( static bool is_niagara(int features) { return (features & sun4v_m) != 0; } ) +#ifdef ASSERT + static bool is_niagara(int features) { + // 'sun4v_m' may be defined on both Sun/Oracle Sparc CPUs as well as + // on Fujitsu Sparc64 CPUs, but only Sun/Oracle Sparcs can be 'niagaras'. + return (features & sun4v_m) != 0 && (features & sparc64_family_m) == 0; + } +#endif // Returns true if it is niagara1 (T1). static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); } @@ -117,6 +125,7 @@ static bool has_vis3() { return (_features & vis3_instructions_m) != 0; } static bool has_blk_init() { return (_features & blk_init_instructions_m) != 0; } static bool has_cbcond() { return (_features & cbcond_instructions_m) != 0; } + static bool has_sparc5_instr() { return (_features & sparc5_instructions_m) != 0; } static bool supports_compare_and_exchange() { return has_v9(); } @@ -127,6 +136,7 @@ static bool is_M_series() { return is_M_family(_features); } static bool is_T4() { return is_T_family(_features) && has_cbcond(); } + static bool is_T7() { return is_T_family(_features) && has_sparc5_instr(); } // Fujitsu SPARC64 static bool is_sparc64() { return (_features & sparc64_family_m) != 0; } @@ -146,7 +156,7 @@ static const char* cpu_features() { return _features_str; } static intx prefetch_data_size() { - return is_T4() ? 32 : 64; // default prefetch block size on sparc + return is_T4() && !is_T7() ? 32 : 64; // default prefetch block size on sparc } // Prefetch diff -r 80e04c4cd4b2 -r 205e1ae8868b src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/cpu/x86/vm/assembler_x86.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -7641,10 +7641,12 @@ // if fast computation is not possible, result is NaN. Requires // fallback from user of this macro. // increase precision for intermediate steps of the computation + BLOCK_COMMENT("fast_pow {"); increase_precision(); fyl2x(); // Stack: (Y*log2(X)) ... pow_exp_core_encoding(); // Stack: exp(X) ... restore_precision(); + BLOCK_COMMENT("} fast_pow"); } void MacroAssembler::fast_exp() { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/cpu/zero/vm/stack_zero.hpp --- a/src/cpu/zero/vm/stack_zero.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/cpu/zero/vm/stack_zero.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -99,7 +99,7 @@ int shadow_pages_size() const { return _shadow_pages_size; } - int abi_stack_available(Thread *thread) const; + ssize_t abi_stack_available(Thread *thread) const; public: void overflow_check(int required_words, TRAPS); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/cpu/zero/vm/stack_zero.inline.hpp --- a/src/cpu/zero/vm/stack_zero.inline.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/cpu/zero/vm/stack_zero.inline.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -47,10 +47,11 @@ // This method returns the amount of ABI stack available for us // to use under normal circumstances. Note that the returned // value can be negative. -inline int ZeroStack::abi_stack_available(Thread *thread) const { - int stack_used = thread->stack_base() - (address) &stack_used; - int stack_free = thread->stack_size() - stack_used; - return stack_free - shadow_pages_size(); +inline ssize_t ZeroStack::abi_stack_available(Thread *thread) const { + ssize_t stack_used = thread->stack_base() - (address) &stack_used + + (StackYellowPages+StackRedPages+StackShadowPages) * os::vm_page_size(); + ssize_t stack_free = thread->stack_size() - stack_used; + return stack_free; } #endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os/bsd/dtrace/libjvm_db.c --- a/src/os/bsd/dtrace/libjvm_db.c Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os/bsd/dtrace/libjvm_db.c Thu Nov 27 11:27:10 2014 +0000 @@ -261,6 +261,9 @@ uint64_t base; int err; + /* Clear *vmp now in case we jump to fail: */ + memset(vmp, 0, sizeof(VMStructEntry)); + err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr); CHECK_FAIL(err); err = read_pointer(J, sym_addr, &gHotSpotVMStructs); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os/bsd/vm/os_bsd.cpp --- a/src/os/bsd/vm/os_bsd.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os/bsd/vm/os_bsd.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1830,9 +1830,6 @@ ::abort(); } -// unused on bsd for now. -void os::set_error_file(const char *logfile) {} - // This method is a copy of JDK's sysGetLastErrorString // from src/solaris/hpi/src/system_md.c @@ -2596,6 +2593,7 @@ // determine if this is a legacy image or modules image // modules image doesn't have "jre" subdirectory len = strlen(buf); + assert(len < buflen, "Ran out of buffer space"); jrelib_p = buf + len; // Add the appropriate library subdir @@ -2631,7 +2629,7 @@ } } - strcpy(saved_jvm_path, buf); + strncpy(saved_jvm_path, buf, MAXPATHLEN); } void os::print_jni_name_prefix_on(outputStream* st, int args_size) { @@ -3713,9 +3711,21 @@ } } -int os::naked_sleep() { - // %% make the sleep time an integer flag. for now use 1 millisec. - return os::sleep(Thread::current(), 1, false); +void os::naked_short_sleep(jlong ms) { + struct timespec req; + + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + req.tv_sec = 0; + if (ms > 0) { + req.tv_nsec = (ms % 1000) * 1000000; + } + else { + req.tv_nsec = 1; + } + + nanosleep(&req, NULL); + + return; } // Sleep forever; naked call to OS-specific sleep; use with CAUTION @@ -4775,6 +4785,14 @@ (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size()); +#ifdef ZERO + // If this is Zero, allow at the very minimum one page each for the + // Zero stack and the native stack. This won't make any difference + // for 4k pages, but is significant for large pages. + os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed, + (size_t)(StackYellowPages+StackRedPages+StackShadowPages+2) * Bsd::page_size()); +#endif + size_t threadStackSizeInBytes = ThreadStackSize * K; if (threadStackSizeInBytes != 0 && threadStackSizeInBytes < os::Bsd::min_stack_allowed) { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os/linux/vm/os_linux.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1645,9 +1645,6 @@ ::abort(); } -// unused on linux for now. -void os::set_error_file(const char *logfile) {} - // This method is a copy of JDK's sysGetLastErrorString // from src/solaris/hpi/src/system_md.c @@ -2491,6 +2488,7 @@ // determine if this is a legacy image or modules image // modules image doesn't have "jre" subdirectory len = strlen(buf); + assert(len < buflen, "Ran out of buffer room"); jrelib_p = buf + len; snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); if (0 != access(buf, F_OK)) { @@ -2513,7 +2511,7 @@ } } - strcpy(saved_jvm_path, buf); + strncpy(saved_jvm_path, buf, MAXPATHLEN); } void os::print_jni_name_prefix_on(outputStream* st, int args_size) { @@ -3960,9 +3958,33 @@ } } -int os::naked_sleep() { - // %% make the sleep time an integer flag. for now use 1 millisec. - return os::sleep(Thread::current(), 1, false); +// +// Short sleep, direct OS call. +// +// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee +// sched_yield(2) will actually give up the CPU: +// +// * Alone on this pariticular CPU, keeps running. +// * Before the introduction of "skip_buddy" with "compat_yield" disabled +// (pre 2.6.39). +// +// So calling this with 0 is an alternative. +// +void os::naked_short_sleep(jlong ms) { + struct timespec req; + + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + req.tv_sec = 0; + if (ms > 0) { + req.tv_nsec = (ms % 1000) * 1000000; + } + else { + req.tv_nsec = 1; + } + + nanosleep(&req, NULL); + + return; } // Sleep forever; naked call to OS-specific sleep; use with CAUTION @@ -4870,6 +4892,7 @@ pthread_mutex_init(&dl_mutex, NULL); +NOT_ZERO ( // If the pagesize of the VM is greater than 8K determine the appropriate // number of initial guard pages. The user can change this with the // command line arguments, if needed. @@ -4878,6 +4901,7 @@ StackRedPages = 1; StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size(); } + ) } // To install functions for atexit system call @@ -4934,6 +4958,14 @@ (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() + (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size()); +#ifdef ZERO + // If this is Zero, allow at the very minimum one page each for the + // Zero stack and the native stack. This won't make any difference + // for 4k pages, but is significant for large pages. + os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed, + (size_t)(StackYellowPages+StackRedPages+StackShadowPages+2) * Linux::page_size()); +#endif + size_t threadStackSizeInBytes = ThreadStackSize * K; if (threadStackSizeInBytes != 0 && threadStackSizeInBytes < os::Linux::min_stack_allowed) { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os/solaris/dtrace/libjvm_db.c --- a/src/os/solaris/dtrace/libjvm_db.c Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os/solaris/dtrace/libjvm_db.c Thu Nov 27 11:27:10 2014 +0000 @@ -261,6 +261,9 @@ uint64_t base; int err; + /* Clear *vmp now in case we jump to fail: */ + memset(vmp, 0, sizeof(VMStructEntry)); + err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr); CHECK_FAIL(err); err = read_pointer(J, sym_addr, &gHotSpotVMStructs); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os/solaris/vm/os_solaris.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1877,9 +1877,6 @@ ::abort(); // dump core (for debugging) } -// unused -void os::set_error_file(const char *logfile) {} - // DLL functions const char* os::dll_file_extension() { return ".so"; } @@ -2561,6 +2558,7 @@ // determine if this is a legacy image or modules image // modules image doesn't have "jre" subdirectory len = strlen(buf); + assert(len < buflen, "Ran out of buffer space"); jrelib_p = buf + len; snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); if (0 != access(buf, F_OK)) { @@ -2581,7 +2579,7 @@ } } - strcpy(saved_jvm_path, buf); + strncpy(saved_jvm_path, buf, MAXPATHLEN); } @@ -3744,9 +3742,14 @@ return os_sleep(millis, interruptible); } -int os::naked_sleep() { - // %% make the sleep time an integer flag. for now use 1 millisec. - return os_sleep(1, false); +void os::naked_short_sleep(jlong ms) { + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + + // usleep is deprecated and removed from POSIX, in favour of nanosleep, but + // Solaris requires -lrt for this. + usleep((ms * 1000)); + + return; } // Sleep forever; naked call to OS-specific sleep; use with CAUTION diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os/windows/vm/os_windows.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1773,27 +1773,28 @@ // libjvm.so is installed there (append a fake suffix // hotspot/libjvm.so). char* java_home_var = ::getenv("JAVA_HOME"); - if (java_home_var != NULL && java_home_var[0] != 0) { - - strncpy(buf, java_home_var, buflen); - - // determine if this is a legacy image or modules image - // modules image doesn't have "jre" subdirectory - size_t len = strlen(buf); - char* jrebin_p = buf + len; - jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); - if (0 != _access(buf, 0)) { - jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); - } - len = strlen(buf); - jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); + if (java_home_var != NULL && java_home_var[0] != 0 && + strlen(java_home_var) < (size_t)buflen) { + + strncpy(buf, java_home_var, buflen); + + // determine if this is a legacy image or modules image + // modules image doesn't have "jre" subdirectory + size_t len = strlen(buf); + char* jrebin_p = buf + len; + jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); + if (0 != _access(buf, 0)) { + jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); + } + len = strlen(buf); + jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); } } if(buf[0] == '\0') { - GetModuleFileName(vm_lib_handle, buf, buflen); - } - strcpy(saved_jvm_path, buf); + GetModuleFileName(vm_lib_handle, buf, buflen); + } + strncpy(saved_jvm_path, buf, MAX_PATH); } @@ -2218,17 +2219,6 @@ #endif //_WIN64 -// Fatal error reporting is single threaded so we can make this a -// static and preallocated. If it's more than MAX_PATH silently ignore -// it. -static char saved_error_file[MAX_PATH] = {0}; - -void os::set_error_file(const char *logfile) { - if (strlen(logfile) <= MAX_PATH) { - strncpy(saved_error_file, logfile, MAX_PATH); - } -} - static inline void report_error(Thread* t, DWORD exception_code, address addr, void* siginfo, void* context) { VMError err(t, exception_code, addr, siginfo, context); @@ -3431,6 +3421,16 @@ return result; } +// +// Short sleep, direct OS call. +// +// ms = 0, means allow others (if any) to run. +// +void os::naked_short_sleep(jlong ms) { + assert(ms < 1000, "Un-interruptable sleep, short time use only"); + Sleep(ms); +} + // Sleep forever; naked call to OS-specific sleep; use with CAUTION void os::infinite_sleep() { while (true) { // sleep forever ... diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp --- a/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -239,7 +239,9 @@ // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). - return __sync_lock_test_and_set (dest, exchange_value); + jint result = __sync_lock_test_and_set (dest, exchange_value); + __sync_synchronize(); + return result; #endif // M68K #endif // ARM } @@ -252,7 +254,9 @@ #ifdef M68K return m68k_lock_test_and_set(dest, exchange_value); #else - return __sync_lock_test_and_set (dest, exchange_value); + intptr_t result = __sync_lock_test_and_set (dest, exchange_value); + __sync_synchronize(); + return result; #endif // M68K #endif // ARM } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp --- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -118,7 +118,7 @@ *ret_sp = os::Linux::ucontext_get_sp(uc); } if (ret_fp) { - *ret_fp = os::Linux::ucontext_get_fp(uc); + *ret_fp = (intptr_t*)NULL; } } else { // construct empty ExtendedPC for return value checking @@ -136,18 +136,15 @@ frame os::fetch_frame_from_context(void* ucVoid) { intptr_t* sp; - intptr_t* fp; - ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); - return frame(sp, fp, epc.pc()); + ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, NULL); + return frame(sp, frame::unpatchable, epc.pc()); } frame os::get_sender_for_C_frame(frame* fr) { - return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); + return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc()); } frame os::current_frame() { - fprintf(stderr, "current_frame()"); - intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()(); frame myframe(sp, frame::unpatchable, CAST_FROM_FN_PTR(address, os::current_frame)); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp --- a/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -55,7 +55,7 @@ if (detect_niagara()) { NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");) - features = niagara1_m; + features = niagara1_m | T_family_m; } return features; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp --- a/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -243,7 +243,9 @@ // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). - return __sync_lock_test_and_set (dest, exchange_value); + jint result = __sync_lock_test_and_set (dest, exchange_value); + __sync_synchronize(); + return result; #endif // M68K #endif // ARM } @@ -256,7 +258,9 @@ #ifdef M68K return m68k_lock_test_and_set(dest, exchange_value); #else - return __sync_lock_test_and_set (dest, exchange_value); + intptr_t result = __sync_lock_test_and_set (dest, exchange_value); + __sync_synchronize(); + return result; #endif // M68K #endif // ARM } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os_cpu/linux_zero/vm/os_linux_zero.cpp --- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -317,15 +317,15 @@ /////////////////////////////////////////////////////////////////////////////// // thread stack -#ifndef PPC -size_t os::Linux::min_stack_allowed = 64 * K; -#else +#ifdef PPC #ifdef _LP64 // Default for 64 bit must be at least 1600 K size_t os::Linux::min_stack_allowed = 1664 * K; #else size_t os::Linux::min_stack_allowed = 1152 * K; #endif +#else +size_t os::Linux::min_stack_allowed = 64 * K; #endif bool os::Linux::supports_variable_stack_size() { @@ -540,3 +540,7 @@ } }; #endif // !_LP64 +#ifndef PRODUCT +void os::verify_stack_alignment() { +} +#endif diff -r 80e04c4cd4b2 -r 205e1ae8868b src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -75,13 +75,19 @@ do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m); // Extract valid instruction set extensions. - uint_t av; - uint_t avn = os::Solaris::getisax(&av, 1); - assert(avn == 1, "should only return one av"); + uint_t avs[2]; + uint_t avn = os::Solaris::getisax(avs, 2); + assert(avn <= 2, "should return two or less av's"); + uint_t av = avs[0]; #ifndef PRODUCT - if (PrintMiscellaneous && Verbose) - tty->print_cr("getisax(2) returned: " PTR32_FORMAT, av); + if (PrintMiscellaneous && Verbose) { + tty->print("getisax(2) returned: " PTR32_FORMAT, av); + if (avn > 1) { + tty->print(", " PTR32_FORMAT, avs[1]); + } + tty->cr(); + } #endif if (av & AV_SPARC_MUL32) features |= hardware_mul32_m; @@ -91,6 +97,13 @@ if (av & AV_SPARC_POPC) features |= hardware_popc_m; if (av & AV_SPARC_VIS) features |= vis1_instructions_m; if (av & AV_SPARC_VIS2) features |= vis2_instructions_m; + if (avn > 1) { + uint_t av2 = avs[1]; +#ifndef AV2_SPARC_SPARC5 +#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */ +#endif + if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m; + } // Next values are not defined before Solaris 10 // but Solaris 8 is used for jdk6 update builds. diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/adlc/formssel.cpp --- a/src/share/vm/adlc/formssel.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/adlc/formssel.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -565,12 +565,6 @@ attr = (Attribute *)attr->_next; } - // Ugly: until a better fix is implemented, disable rematerialization for - // negD nodes because they are proved to be problematic. - if (is_ideal_negD()) { - return false; - } - // Constants if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) ) rematerialize = true; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -3663,6 +3663,7 @@ // now perform tests that are based on flag settings if (callee->force_inline()) { + if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel"); print_inlining(callee, "force inline by annotation"); } else if (callee->should_inline()) { print_inlining(callee, "force inline by CompileOracle"); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/c1/c1_globals.hpp diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/ci/bcEscapeAnalyzer.cpp --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -158,6 +158,9 @@ void BCEscapeAnalyzer::set_method_escape(ArgumentMap vars) { clear_bits(vars, _arg_local); + if (vars.contains_allocated()) { + _allocated_escapes = true; + } } void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars, bool merge) { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/classFileParser.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -961,7 +961,7 @@ "Wrong size %u for field's Signature attribute in class file %s", attribute_length, CHECK); } - generic_signature_index = cfs->get_u2(CHECK); + generic_signature_index = parse_generic_signature_attribute(cp, CHECK); } else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) { runtime_visible_annotations_length = attribute_length; runtime_visible_annotations = cfs->get_u1_buffer(); @@ -1698,7 +1698,8 @@ } // Sift through annotations, looking for those significant to the VM: -void ClassFileParser::parse_annotations(u1* buffer, int limit, +void ClassFileParser::parse_annotations(Handle class_loader, + u1* buffer, int limit, constantPoolHandle cp, ClassFileParser::AnnotationCollector* coll, TRAPS) { @@ -1736,7 +1737,7 @@ } // Here is where parsing particular annotations will take place. - AnnotationCollector::ID id = coll->annotation_index(aname); + AnnotationCollector::ID id = coll->annotation_index(class_loader, is_anonymous(), aname); if (id == AnnotationCollector::_unknown) continue; coll->set_annotation(id); // If there are no values, just set the bit and move on: @@ -1765,20 +1766,30 @@ } } -ClassFileParser::AnnotationCollector::ID ClassFileParser::AnnotationCollector::annotation_index(Symbol* name) { +ClassFileParser::AnnotationCollector::ID ClassFileParser::AnnotationCollector::annotation_index(Handle class_loader, + bool is_anonymous, + Symbol* name) { vmSymbols::SID sid = vmSymbols::find_sid(name); + // Privileged code can use all annotations. Other code silently drops some. + const bool privileged = class_loader.is_null() || is_anonymous || + class_loader()->klass()->klass_part()->name() == + vmSymbols::sun_misc_Launcher_ExtClassLoader(); switch (sid) { case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature): if (_location != _in_method) break; // only allow for methods + if (!privileged) break; // only allow in privileged code return _method_ForceInline; case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_DontInline_signature): if (_location != _in_method) break; // only allow for methods + if (!privileged) break; // only allow in privileged code return _method_DontInline; case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature): if (_location != _in_method) break; // only allow for methods + if (!privileged) break; // only allow in privileged code return _method_LambdaForm_Compiled; case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Hidden_signature): if (_location != _in_method) break; // only allow for methods + if (!privileged) break; // only allow in privileged code return _method_LambdaForm_Hidden; default: break; } @@ -1818,8 +1829,8 @@ // from the method back up to the containing klass. These flag values // are added to klass's access_flags. -methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interface, - AccessFlags *promoted_flags, +methodHandle ClassFileParser::parse_method(Handle class_loader, constantPoolHandle cp, + bool is_interface, AccessFlags *promoted_flags, typeArrayHandle* method_annotations, typeArrayHandle* method_parameter_annotations, typeArrayHandle* method_default_annotations, @@ -2122,13 +2133,12 @@ "Invalid Signature attribute length %u in class file %s", method_attribute_length, CHECK_(nullHandle)); } - cfs->guarantee_more(2, CHECK_(nullHandle)); // generic_signature_index - generic_signature_index = cfs->get_u2_fast(); + generic_signature_index = parse_generic_signature_attribute(cp, CHECK_(nullHandle)); } else if (method_attribute_name == vmSymbols::tag_runtime_visible_annotations()) { runtime_visible_annotations_length = method_attribute_length; runtime_visible_annotations = cfs->get_u1_buffer(); assert(runtime_visible_annotations != NULL, "null visible annotations"); - parse_annotations(runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle)); + parse_annotations(class_loader, runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle)); cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle)); } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) { runtime_invisible_annotations_length = method_attribute_length; @@ -2357,8 +2367,8 @@ // from the methods back up to the containing klass. These flag values // are added to klass's access_flags. -objArrayHandle ClassFileParser::parse_methods(constantPoolHandle cp, bool is_interface, - AccessFlags* promoted_flags, +objArrayHandle ClassFileParser::parse_methods(Handle class_loader, constantPoolHandle cp, + bool is_interface, AccessFlags* promoted_flags, bool* has_final_method, objArrayOop* methods_annotations_oop, objArrayOop* methods_parameter_annotations_oop, @@ -2381,7 +2391,8 @@ objArrayHandle methods_parameter_annotations; objArrayHandle methods_default_annotations; for (int index = 0; index < length; index++) { - methodHandle method = parse_method(cp, is_interface, + methodHandle method = parse_method(class_loader, cp, + is_interface, promoted_flags, &method_annotations, &method_parameter_annotations, @@ -2490,6 +2501,17 @@ } } +// Parse generic_signature attribute for methods and fields +u2 ClassFileParser::parse_generic_signature_attribute(constantPoolHandle cp, TRAPS) { + ClassFileStream* cfs = stream(); + cfs->guarantee_more(2, CHECK_0); // generic_signature_index + u2 generic_signature_index = cfs->get_u2_fast(); + check_property( + valid_symbol_at(cp, generic_signature_index), + "Invalid Signature attribute at constant pool index %u in class file %s", + generic_signature_index, CHECK_0); + return generic_signature_index; +} void ClassFileParser::parse_classfile_sourcefile_attribute(constantPoolHandle cp, TRAPS) { ClassFileStream* cfs = stream(); @@ -2654,18 +2676,19 @@ ClassFileStream* cfs = stream(); u1* current_start = cfs->current(); - cfs->guarantee_more(2, CHECK); // length + guarantee_property(attribute_byte_length >= sizeof(u2), + "Invalid BootstrapMethods attribute length %u in class file %s", + attribute_byte_length, + CHECK); + + cfs->guarantee_more(attribute_byte_length, CHECK); + int attribute_array_length = cfs->get_u2_fast(); guarantee_property(_max_bootstrap_specifier_index < attribute_array_length, "Short length on BootstrapMethods in class file %s", CHECK); - guarantee_property(attribute_byte_length > sizeof(u2), - "Invalid BootstrapMethods attribute length %u in class file %s", - attribute_byte_length, - CHECK); - // The attribute contains a counted array of counted tuples of shorts, // represending bootstrap specifiers: // length*{bootstrap_method_index, argument_count*{argument_index}} @@ -2698,6 +2721,11 @@ "bootstrap_method_index %u has bad constant type in class file %s", bootstrap_method_index, CHECK); + + guarantee_property((operand_fill_index + 1 + argument_count) < operands->length(), + "Invalid BootstrapMethods num_bootstrap_methods or num_bootstrap_arguments value in class file %s", + CHECK); + operands->short_at_put(operand_fill_index++, bootstrap_method_index); operands->short_at_put(operand_fill_index++, argument_count); @@ -2715,7 +2743,6 @@ } assert(operand_fill_index == operands()->length(), "exact fill"); - assert(constantPoolOopDesc::operand_array_length(operands()) == attribute_array_length, "correct decode"); u1* current_end = cfs->current(); guarantee_property(current_end == current_start + attribute_byte_length, @@ -2726,7 +2753,8 @@ } -void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, +void ClassFileParser::parse_classfile_attributes(Handle class_loader, + constantPoolHandle cp, ClassFileParser::ClassAnnotationCollector* parsed_annotations, TRAPS) { ClassFileStream* cfs = stream(); @@ -2809,7 +2837,8 @@ runtime_visible_annotations_length = attribute_length; runtime_visible_annotations = cfs->get_u1_buffer(); assert(runtime_visible_annotations != NULL, "null visible annotations"); - parse_annotations(runtime_visible_annotations, + parse_annotations(class_loader, + runtime_visible_annotations, runtime_visible_annotations_length, cp, parsed_annotations, @@ -3172,7 +3201,8 @@ objArrayOop methods_annotations_oop = NULL; objArrayOop methods_parameter_annotations_oop = NULL; objArrayOop methods_default_annotations_oop = NULL; - objArrayHandle methods = parse_methods(cp, access_flags.is_interface(), + objArrayHandle methods = parse_methods(class_loader, cp, + access_flags.is_interface(), &promoted_flags, &has_final_method, &methods_annotations_oop, @@ -3186,7 +3216,7 @@ // Additional attributes ClassAnnotationCollector parsed_annotations; - parse_classfile_attributes(cp, &parsed_annotations, CHECK_(nullHandle)); + parse_classfile_attributes(class_loader, cp, &parsed_annotations, CHECK_(nullHandle)); // Make sure this is the end of class file stream guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle)); @@ -3717,7 +3747,7 @@ } // Allocate mirror and initialize static fields - java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); + java_lang_Class::create_mirror(this_klass, class_loader, CHECK_(nullHandle)); ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), false /* not shared class */); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/classFileParser.hpp --- a/src/share/vm/classfile/classFileParser.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/classFileParser.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,7 +102,7 @@ assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, ""); } // If this annotation name has an ID, report it (or _none). - ID annotation_index(Symbol* name); + ID annotation_index(Handle class_loader, bool is_anonymous, Symbol* name); // Set the annotation name: void set_annotation(ID id) { assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); @@ -169,14 +169,14 @@ u2* java_fields_count_ptr, TRAPS); // Method parsing - methodHandle parse_method(constantPoolHandle cp, bool is_interface, - AccessFlags* promoted_flags, + methodHandle parse_method(Handle class_loader, constantPoolHandle cp, + bool is_interface, AccessFlags* promoted_flags, typeArrayHandle* method_annotations, typeArrayHandle* method_parameter_annotations, typeArrayHandle* method_default_annotations, TRAPS); - objArrayHandle parse_methods (constantPoolHandle cp, bool is_interface, - AccessFlags* promoted_flags, + objArrayHandle parse_methods (Handle class_loader, constantPoolHandle cp, + bool is_interface, AccessFlags* promoted_flags, bool* has_final_method, objArrayOop* methods_annotations_oop, objArrayOop* methods_parameter_annotations_oop, @@ -202,6 +202,7 @@ typeArrayOop parse_stackmap_table(u4 code_attribute_length, TRAPS); // Classfile attribute parsing + u2 parse_generic_signature_attribute(constantPoolHandle cp, TRAPS); void parse_classfile_sourcefile_attribute(constantPoolHandle cp, TRAPS); void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, int length, TRAPS); u2 parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start, @@ -210,7 +211,8 @@ u2 enclosing_method_method_index, constantPoolHandle cp, TRAPS); - void parse_classfile_attributes(constantPoolHandle cp, + void parse_classfile_attributes(Handle class_loader, + constantPoolHandle cp, ClassAnnotationCollector* parsed_annotations, TRAPS); void parse_classfile_synthetic_attribute(constantPoolHandle cp, TRAPS); @@ -224,7 +226,7 @@ int runtime_invisible_annotations_length, TRAPS); int skip_annotation(u1* buffer, int limit, int index); int skip_annotation_value(u1* buffer, int limit, int index); - void parse_annotations(u1* buffer, int limit, constantPoolHandle cp, + void parse_annotations(Handle class_loader, u1* buffer, int limit, constantPoolHandle cp, /* Results (currently, only one result is supported): */ AnnotationCollector* result, TRAPS); @@ -335,6 +337,12 @@ : cp->tag_at(index).is_klass_reference()); } + // Checks that the cpool index is in range and is a utf8 + bool valid_symbol_at(constantPoolHandle cp, int cpool_index) { + return (cp->is_within_bounds(cpool_index) && + cp->tag_at(cpool_index).is_utf8()); + } + public: // Constructor ClassFileParser(ClassFileStream* st) { set_stream(st); } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/javaClasses.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -533,10 +533,10 @@ } } } - create_mirror(k, CHECK); + create_mirror(k, Handle(NULL), CHECK); } -oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) { +oop java_lang_Class::create_mirror(KlassHandle k, Handle class_loader, TRAPS) { assert(k->java_mirror() == NULL, "should only assign mirror once"); // Use this moment of initialization to cache modifier_flags also, // to support Class.getModifiers(). Instance classes recalculate @@ -577,6 +577,8 @@ // Initialize static fields instanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL); } + // set the classLoader field in the java_lang_Class instance + set_class_loader(mirror(), class_loader()); return mirror(); } else { return NULL; @@ -602,6 +604,18 @@ java_class->int_field_put(_static_oop_field_count_offset, size); } +void java_lang_Class::set_class_loader(oop java_class, oop loader) { + // jdk7 runs Queens in bootstrapping and jdk8-9 has no coordinated pushes yet. + if (_class_loader_offset != 0) { + java_class->obj_field_put(_class_loader_offset, loader); + } +} + +oop java_lang_Class::class_loader(oop java_class) { + assert(_class_loader_offset != 0, "must be set"); + return java_class->obj_field(_class_loader_offset); +} + oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) { // This should be improved by adding a field at the Java level or by // introducing a new VM klass (see comment in ClassFileParser) @@ -765,6 +779,12 @@ compute_optional_offset(classRedefinedCount_offset, klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature()); + // Needs to be optional because the old build runs Queens during bootstrapping + // and jdk8-9 doesn't have coordinated pushes yet. + compute_optional_offset(_class_loader_offset, + klass_oop, vmSymbols::classClassLoader_name(), + vmSymbols::classloader_signature()); + CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); } @@ -2833,6 +2853,7 @@ int java_lang_Class::_resolved_constructor_offset; int java_lang_Class::_oop_size_offset; int java_lang_Class::_static_oop_field_count_offset; +int java_lang_Class::_class_loader_offset; int java_lang_Throwable::backtrace_offset; int java_lang_Throwable::detailMessage_offset; int java_lang_Throwable::cause_offset; @@ -3280,3 +3301,4 @@ JavaClasses::check_offsets(); FilteredFieldsMap::initialize(); // must be done after computing offsets. } + diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/javaClasses.hpp --- a/src/share/vm/classfile/javaClasses.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/javaClasses.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -224,14 +224,21 @@ static int _oop_size_offset; static int _static_oop_field_count_offset; + static int _class_loader_offset; + static bool offsets_computed; static int classRedefinedCount_offset; + static void set_class_loader(oop java_class, oop class_loader); public: static void compute_offsets(); // Instance creation - static oop create_mirror(KlassHandle k, TRAPS); + static oop create_mirror(KlassHandle k, Handle class_loader, TRAPS); + static oop create_mirror(KlassHandle k, TRAPS) { + return create_mirror(k, Handle(), THREAD); + } + static void fixup_mirror(KlassHandle k, TRAPS); static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); // Conversion @@ -267,6 +274,8 @@ static int classRedefinedCount(oop the_class_mirror); static void set_classRedefinedCount(oop the_class_mirror, int value); + static oop class_loader(oop java_class); + static int oop_size(oop java_class); static void set_oop_size(oop java_class, int size); static int static_oop_field_count(oop java_class); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/stackMapFrame.cpp --- a/src/share/vm/classfile/stackMapFrame.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/stackMapFrame.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,15 +54,19 @@ return frame; } -bool StackMapFrame::has_new_object() const { +// Return true if frame has an uninitialized (new) object that differs +// from the target frame's object. +bool StackMapFrame::has_nonmatching_new_object(const StackMapFrame *target_frame) const { int32_t i; for (i = 0; i < _max_locals; i++) { - if (_locals[i].is_uninitialized()) { + if (_locals[i].is_uninitialized() && + !_locals[i].equals(target_frame->_locals[i])) { return true; } } for (i = 0; i < _stack_size; i++) { - if (_stack[i].is_uninitialized()) { + if (_stack[i].is_uninitialized() && + !_stack[i].equals(target_frame->_stack[i])) { return true; } } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/stackMapFrame.hpp --- a/src/share/vm/classfile/stackMapFrame.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/stackMapFrame.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -155,8 +155,9 @@ const methodHandle m, VerificationType thisKlass, TRAPS); // Search local variable type array and stack type array. - // Return true if an uninitialized object is found. - bool has_new_object() const; + // Return true if an uninitialized object is found that is + // not equal to the corresponding object on the target frame. + bool has_nonmatching_new_object(const StackMapFrame *target_frame) const; // Search local variable type array and stack type array. // Set every element with type of old_object to new_object. diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/stackMapTable.cpp --- a/src/share/vm/classfile/stackMapTable.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/stackMapTable.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,24 +70,26 @@ bool StackMapTable::match_stackmap( StackMapFrame* frame, int32_t target, - bool match, bool update, ErrorContext* ctx, TRAPS) const { + bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const { int index = get_index_from_offset(target); - return match_stackmap(frame, target, index, match, update, ctx, THREAD); + return match_stackmap(frame, target, index, match, update, handler, ctx, THREAD); } // Match and/or update current_frame to the frame in stackmap table with // specified offset and frame index. Return true if the two frames match. +// handler is true if the frame in stackmap_table is for an exception handler. // -// The values of match and update are: _match__update_ +// The values of match and update are: _match__update__handler // -// checking a branch target/exception handler: true false +// checking a branch target: true false false +// checking an exception handler: true false true // linear bytecode verification following an -// unconditional branch: false true +// unconditional branch: false true false // linear bytecode verification not following an -// unconditional branch: true true +// unconditional branch: true true false bool StackMapTable::match_stackmap( StackMapFrame* frame, int32_t target, int32_t frame_index, - bool match, bool update, ErrorContext* ctx, TRAPS) const { + bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const { if (frame_index < 0 || frame_index >= _frame_count) { *ctx = ErrorContext::missing_stackmap(frame->offset()); frame->verifier()->verify_error( @@ -98,11 +100,9 @@ StackMapFrame *stackmap_frame = _frame_array[frame_index]; bool result = true; if (match) { - // when checking handler target, match == true && update == false - bool is_exception_handler = !update; // Has direct control flow from last instruction, need to match the two // frames. - result = frame->is_assignable_to(stackmap_frame, is_exception_handler, + result = frame->is_assignable_to(stackmap_frame, handler, ctx, CHECK_VERIFY_(frame->verifier(), result)); } if (update) { @@ -126,7 +126,7 @@ StackMapFrame* frame, int32_t target, TRAPS) const { ErrorContext ctx; bool match = match_stackmap( - frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier())); + frame, target, true, false, false, &ctx, CHECK_VERIFY(frame->verifier())); if (!match || (target < 0 || target >= _code_length)) { frame->verifier()->verify_error(ctx, "Inconsistent stackmap frames at branch target %d", target); @@ -138,7 +138,10 @@ void StackMapTable::check_new_object( const StackMapFrame* frame, int32_t target, TRAPS) const { - if (frame->offset() > target && frame->has_new_object()) { + int frame_index = get_index_from_offset(target); + assert(frame_index >= 0 && frame_index < _frame_count, "bad frame index"); + if (frame->offset() > target && + frame->has_nonmatching_new_object(_frame_array[frame_index])) { frame->verifier()->verify_error( ErrorContext::bad_code(frame->offset()), "Uninitialized object exists on backward branch %d", target); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/stackMapTable.hpp --- a/src/share/vm/classfile/stackMapTable.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/stackMapTable.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,12 +77,12 @@ // specified offset. Return true if the two frames match. bool match_stackmap( StackMapFrame* current_frame, int32_t offset, - bool match, bool update, ErrorContext* ctx, TRAPS) const; + bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const; // Match and/or update current_frame to the frame in stackmap table with // specified offset and frame index. Return true if the two frames match. bool match_stackmap( StackMapFrame* current_frame, int32_t offset, int32_t frame_index, - bool match, bool update, ErrorContext* ctx, TRAPS) const; + bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const; // Check jump instructions. Make sure there are no uninitialized // instances on backward branch. diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/verifier.cpp --- a/src/share/vm/classfile/verifier.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/verifier.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -632,6 +632,7 @@ bool no_control_flow = false; // Set to true when there is no direct control // flow from current instruction to the next // instruction in sequence + Bytecodes::Code opcode; while (!bcs.is_last_bytecode()) { // Check for recursive re-verification before each bytecode. @@ -1788,7 +1789,7 @@ // If matched, current_frame will be updated by this method. bool matches = stackmap_table->match_stackmap( current_frame, this_offset, stackmap_index, - !no_control_flow, true, &ctx, CHECK_VERIFY_(this, 0)); + !no_control_flow, true, false, &ctx, CHECK_VERIFY_(this, 0)); if (!matches) { // report type error verify_error(ctx, "Instruction type does not match stack map"); @@ -1835,7 +1836,7 @@ } ErrorContext ctx; bool matches = stackmap_table->match_stackmap( - new_frame, handler_pc, true, false, &ctx, CHECK_VERIFY(this)); + new_frame, handler_pc, true, false, true, &ctx, CHECK_VERIFY(this)); if (!matches) { verify_error(ctx, "Stack map does not match the one at " "exception handler %d", handler_pc); @@ -2224,6 +2225,181 @@ } } +// Look at the method's handlers. If the bci is in the handler's try block +// then check if the handler_pc is already on the stack. If not, push it. +void ClassVerifier::push_handlers(ExceptionTable* exhandlers, + GrowableArray* handler_stack, + u4 bci) { + int exlength = exhandlers->length(); + for(int x = 0; x < exlength; x++) { + if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) { + handler_stack->append_if_missing(exhandlers->handler_pc(x)); + } + } +} + +// Return TRUE if all code paths starting with start_bc_offset end in +// bytecode athrow or loop. +bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) { + ResourceMark rm; + // Create bytecode stream. + RawBytecodeStream bcs(method()); + u4 code_length = method()->code_size(); + bcs.set_start(start_bc_offset); + u4 target; + // Create stack for storing bytecode start offsets for if* and *switch. + GrowableArray* bci_stack = new GrowableArray(30); + // Create stack for handlers for try blocks containing this handler. + GrowableArray* handler_stack = new GrowableArray(30); + // Create list of visited branch opcodes (goto* and if*). + GrowableArray* visited_branches = new GrowableArray(30); + ExceptionTable exhandlers(_method()); + + while (true) { + if (bcs.is_last_bytecode()) { + // if no more starting offsets to parse or if at the end of the + // method then return false. + if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length)) + return false; + // Pop a bytecode starting offset and scan from there. + bcs.set_start(bci_stack->pop()); + } + Bytecodes::Code opcode = bcs.raw_next(); + u4 bci = bcs.bci(); + + // If the bytecode is in a TRY block, push its handlers so they + // will get parsed. + push_handlers(&exhandlers, handler_stack, bci); + + switch (opcode) { + case Bytecodes::_if_icmpeq: + case Bytecodes::_if_icmpne: + case Bytecodes::_if_icmplt: + case Bytecodes::_if_icmpge: + case Bytecodes::_if_icmpgt: + case Bytecodes::_if_icmple: + case Bytecodes::_ifeq: + case Bytecodes::_ifne: + case Bytecodes::_iflt: + case Bytecodes::_ifge: + case Bytecodes::_ifgt: + case Bytecodes::_ifle: + case Bytecodes::_if_acmpeq: + case Bytecodes::_if_acmpne: + case Bytecodes::_ifnull: + case Bytecodes::_ifnonnull: + target = bcs.dest(); + if (visited_branches->contains(bci)) { + if (bci_stack->is_empty()) return true; + // Pop a bytecode starting offset and scan from there. + bcs.set_start(bci_stack->pop()); + } else { + if (target > bci) { // forward branch + if (target >= code_length) return false; + // Push the branch target onto the stack. + bci_stack->push(target); + // then, scan bytecodes starting with next. + bcs.set_start(bcs.next_bci()); + } else { // backward branch + // Push bytecode offset following backward branch onto the stack. + bci_stack->push(bcs.next_bci()); + // Check bytecodes starting with branch target. + bcs.set_start(target); + } + // Record target so we don't branch here again. + visited_branches->append(bci); + } + break; + + case Bytecodes::_goto: + case Bytecodes::_goto_w: + target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w()); + if (visited_branches->contains(bci)) { + if (bci_stack->is_empty()) return true; + // Been here before, pop new starting offset from stack. + bcs.set_start(bci_stack->pop()); + } else { + if (target >= code_length) return false; + // Continue scanning from the target onward. + bcs.set_start(target); + // Record target so we don't branch here again. + visited_branches->append(bci); + } + break; + + // Check that all switch alternatives end in 'athrow' bytecodes. Since it + // is difficult to determine where each switch alternative ends, parse + // each switch alternative until either hit a 'return', 'athrow', or reach + // the end of the method's bytecodes. This is gross but should be okay + // because: + // 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit + // constructor invocations should be rare. + // 2. if each switch alternative ends in an athrow then the parsing should be + // short. If there is no athrow then it is bogus code, anyway. + case Bytecodes::_lookupswitch: + case Bytecodes::_tableswitch: + { + address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize); + u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci; + int keys, delta; + if (opcode == Bytecodes::_tableswitch) { + jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize); + jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize); + // This is invalid, but let the regular bytecode verifier + // report this because the user will get a better error message. + if (low > high) return true; + keys = high - low + 1; + delta = 1; + } else { + keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize); + delta = 2; + } + // Invalid, let the regular bytecode verifier deal with it. + if (keys < 0) return true; + + // Push the offset of the next bytecode onto the stack. + bci_stack->push(bcs.next_bci()); + + // Push the switch alternatives onto the stack. + for (int i = 0; i < keys; i++) { + u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize); + if (target > code_length) return false; + bci_stack->push(target); + } + + // Start bytecode parsing for the switch at the default alternative. + if (default_offset > code_length) return false; + bcs.set_start(default_offset); + break; + } + + case Bytecodes::_return: + return false; + + case Bytecodes::_athrow: + { + if (bci_stack->is_empty()) { + if (handler_stack->is_empty()) { + return true; + } else { + // Parse the catch handlers for try blocks containing athrow. + bcs.set_start(handler_stack->pop()); + } + } else { + // Pop a bytecode offset and starting scanning from there. + bcs.set_start(bci_stack->pop()); + } + } + break; + + default: + ; + } // end switch + } // end while loop + + return false; +} + void ClassVerifier::verify_invoke_init( RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type, StackMapFrame* current_frame, u4 code_length, bool *this_uninit, @@ -2242,6 +2418,30 @@ "Bad method call"); return; } + + // Check if this call is done from inside of a TRY block. If so, make + // sure that all catch clause paths end in a throw. Otherwise, this + // can result in returning an incomplete object. + ExceptionTable exhandlers(_method()); + int exlength = exhandlers.length(); + for(int i = 0; i < exlength; i++) { + u2 start_pc = exhandlers.start_pc(i); + u2 end_pc = exhandlers.end_pc(i); + + if (bci >= start_pc && bci < end_pc) { + if (!ends_in_athrow(exhandlers.handler_pc(i))) { + verify_error(ErrorContext::bad_code(bci), + "Bad method call from after the start of a try block"); + return; + } else if (VerboseVerification) { + ResourceMark rm; + tty->print_cr( + "Survived call to ends_in_athrow(): %s", + current_class()->name()->as_C_string()); + } + } + } + current_frame->initialize_object(type, current_type()); *this_uninit = true; } else if (type.is_uninitialized()) { @@ -2278,6 +2478,11 @@ methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method( vmSymbols::object_initializer_name(), cp->signature_ref_at(bcs->get_index_u2())); + if (m == NULL) { + verify_error(ErrorContext::bad_code(bci), + "Call to missing method"); + return; + } instanceKlassHandle mh(THREAD, m->method_holder()); if (m->is_protected() && !mh->is_same_class_package(_klass())) { bool assignable = current_type().is_assignable_from( diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/verifier.hpp --- a/src/share/vm/classfile/verifier.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/verifier.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "oops/klass.hpp" #include "oops/methodOop.hpp" #include "runtime/handles.hpp" +#include "utilities/growableArray.hpp" #include "utilities/exceptions.hpp" // The verifier class @@ -301,6 +302,16 @@ StackMapFrame* current_frame, u4 code_length, bool* this_uninit, constantPoolHandle cp, TRAPS); + // Used by ends_in_athrow() to push all handlers that contain bci onto + // the handler_stack, if the handler is not already on the stack. + void push_handlers(ExceptionTable* exhandlers, + GrowableArray* handler_stack, + u4 bci); + + // Returns true if all paths starting with start_bc_offset end in athrow + // bytecode or loop. + bool ends_in_athrow(u4 start_bc_offset); + void verify_invoke_instructions( RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame, bool* this_uninit, VerificationType return_type, @@ -398,6 +409,7 @@ Symbol* create_temporary_symbol(const char *s, int length, TRAPS); TypeOrigin ref_ctx(const char* str, TRAPS); + }; inline int ClassVerifier::change_sig_to_verificationType( diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/classfile/vmSymbols.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -546,6 +546,7 @@ template(serializePropertiesToByteArray_signature, "()[B") \ template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \ template(classRedefinedCount_name, "classRedefinedCount") \ + template(classClassLoader_name, "classLoader") \ \ /* trace signatures */ \ TRACE_TEMPLATES(template) \ diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1921,6 +1921,7 @@ ResourceMark rm; char* method_name = method->name()->as_C_string(); strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length); + _last_method_compiled[CompileBroker::name_buffer_length - 1] = '\0'; // ensure null terminated char current_method[CompilerCounters::cmname_buffer_length]; size_t maxLen = CompilerCounters::cmname_buffer_length; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/compiler/oopMap.cpp --- a/src/share/vm/compiler/oopMap.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/compiler/oopMap.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -33,9 +33,13 @@ #include "memory/resourceArea.hpp" #include "runtime/frame.inline.hpp" #include "runtime/signature.hpp" +#include "utilities/dtrace.hpp" #ifdef COMPILER1 #include "c1/c1_Defs.hpp" #endif +#ifndef USDT2 + HS_DTRACE_PROBE_DECL1(provider, gc__collection__delete, *uintptr_t); +#endif /* !USDT2 */ // OopMapStream @@ -677,6 +681,9 @@ " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: %d)", derived_loc, (address)*derived_loc, (address)base, offset); } +#ifndef USDT2 + HS_DTRACE_PROBE1(hotspot, gc__collection__delete, entry); +#endif /* !USDT2 */ // Delete entry delete entry; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -59,6 +59,12 @@ #include "runtime/vmThread.hpp" #include "services/memoryService.hpp" #include "services/runtimeService.hpp" +#include "utilities/dtrace.hpp" + +#ifndef USDT2 + HS_DTRACE_PROBE_DECL4(provider, gc__collection__contig__begin, bool, bool, size_t, bool); + HS_DTRACE_PROBE_DECL4(provider, gc__collection__contig__end, bool, bool, size_t, bool); +#endif /* !USDT2 */ // statics CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; @@ -1648,7 +1654,13 @@ size_t size, bool tlab) { +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__contig__begin, full, clear_all_soft_refs, size, tlab); +#endif /* !USDT2 */ collector()->collect(full, clear_all_soft_refs, size, tlab); +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__contig__end, full, clear_all_soft_refs, size, tlab); +#endif /* !USDT2 */ } void CMSCollector::collect(bool full, diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/g1/g1AllocRegion.hpp --- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -177,7 +177,7 @@ // Should be called when we want to release the active region which // is returned after it's been retired. - HeapRegion* release(); + virtual HeapRegion* release(); #if G1_ALLOC_REGION_TRACING void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -6628,6 +6628,35 @@ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, GCAllocForTenured); } + +HeapRegion* OldGCAllocRegion::release() { + HeapRegion* cur = get(); + if (cur != NULL) { + // Determine how far we are from the next card boundary. If it is smaller than + // the minimum object size we can allocate into, expand into the next card. + HeapWord* top = cur->top(); + HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes); + + size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); + + if (to_allocate_words != 0) { + // We are not at a card boundary. Fill up, possibly into the next, taking the + // end of the region and the minimum object size into account. + to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), + MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); + + // Skip allocation if there is not enough space to allocate even the smallest + // possible object. In this case this region will not be retained, so the + // original problem cannot occur. + if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { + HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */); + CollectedHeap::fill_with_object(dummy, to_allocate_words); + } + } + } + return G1AllocRegion::release(); +} + // Heap region set verification class VerifyRegionListsClosure : public HeapRegionClosure { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -183,6 +183,13 @@ public: OldGCAllocRegion() : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } + + // This specialization of release() makes sure that the last card that has been + // allocated into has been completely filled by a dummy object. + // This avoids races when remembered set scanning wants to update the BOT of the + // last card in the retained old gc alloc region, and allocation threads + // allocating into that card at the same time. + virtual HeapRegion* release(); }; // The G1 STW is alive closure. diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -50,8 +50,13 @@ #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" #include "utilities/copy.hpp" +#include "utilities/dtrace.hpp" #include "utilities/events.hpp" +#ifndef USDT2 + HS_DTRACE_PROBE_DECL2(provider, gc__collection__G1__begin, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__G1__end, *uintptr_t, *uintptr_t); + #endif /* !USDT2 */ class HeapRegion; void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, @@ -89,6 +94,9 @@ // The marking doesn't preserve the marks of biased objects. BiasedLocking::preserve_marks(); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__G1__begin, &sh, sh->gc_cause()); +#endif /* !USDT2 */ mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); mark_sweep_phase2(); @@ -108,6 +116,9 @@ GenRemSet* rs = sh->rem_set(); rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__G1__end, &sh, sh->gc_cause()); +#endif /* !USDT2 */ // "free at last gc" is calculated from these. // CHF: cheating for now!!! // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -54,6 +54,12 @@ #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/workgroup.hpp" +#include "utilities/dtrace.hpp" + +#ifndef USDT2 + HS_DTRACE_PROBE_DECL4(provider, gc__collection__parnew__begin, bool, bool, size_t, bool); + HS_DTRACE_PROBE_DECL4(provider, gc__collection__parnew__end, bool, bool, size_t, bool); +#endif /* !USDT2 */ #ifdef _MSC_VER #pragma warning( push ) @@ -919,6 +925,9 @@ bool clear_all_soft_refs, size_t size, bool is_tlab) { +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__parnew__begin, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -1070,6 +1079,10 @@ gch->print_heap_change(gch_prev_used); } +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__parnew__end, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ + if (PrintGCDetails && ParallelGCVerbose) { TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -43,8 +43,14 @@ #include "runtime/java.hpp" #include "runtime/vmThread.hpp" #include "services/memTracker.hpp" +#include "utilities/dtrace.hpp" #include "utilities/vmError.hpp" +#ifndef USDT2 + HS_DTRACE_PROBE_DECL2(provider, gc__collection__parscavenge__heap__begin, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__parscavenge__heap__end, *uintptr_t, *uintptr_t); +#endif /* !USDT2 */ + PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; PSOldGen* ParallelScavengeHeap::_old_gen = NULL; PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; @@ -815,7 +821,13 @@ } VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__parscavenge__heap__begin, &op, cause); +#endif /* !USDT2 */ VMThread::execute(&op); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__parscavenge__heap__end, &op, cause); +#endif /* !USDT2 */ } // This interface assumes that it's being called by the diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -58,11 +58,18 @@ #include "services/management.hpp" #include "services/memoryService.hpp" #include "services/memTracker.hpp" +#include "utilities/dtrace.hpp" #include "utilities/events.hpp" #include "utilities/stack.inline.hpp" #include +#ifndef USDT2 + HS_DTRACE_PROBE_DECL2(provider, gc__collection__ParallelCompact__clear, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__parallel__collect, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL4(provider, gc__collection__move, *uintptr_t, *uintptr_t, *uintptr_t, *uintptr_t); +#endif /* !USDT2 */ + // All sizes are in HeapWords. const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize; @@ -469,6 +476,9 @@ void ParallelCompactData::clear() { +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__ParallelCompact__clear, &_region_data, _region_data->data_location()); +#endif /* !USDT2 */ memset(_region_data, 0, _region_vspace->committed_size()); memset(_block_data, 0, _block_vspace->committed_size()); } @@ -2011,6 +2021,9 @@ "should be in vm thread"); ParallelScavengeHeap* heap = gc_heap(); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__parallel__collect, heap, heap->gc_cause()); +#endif /* !USDT2 */ GCCause::Cause gc_cause = heap->gc_cause(); assert(!heap->is_gc_active(), "not reentrant"); @@ -3509,6 +3522,9 @@ // past the end of the partial object entering the region (if any). HeapWord* const dest_addr = sd.partial_obj_end(dp_region); HeapWord* const new_top = _space_info[space_id].new_top(); +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__move, &beg_addr, &end_addr, &dest_addr, &new_top); +#endif /* !USDT2 */ assert(new_top >= dest_addr, "bad new_top value"); const size_t words = pointer_delta(new_top, dest_addr); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -55,8 +55,17 @@ #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/memoryService.hpp" +#include "utilities/dtrace.hpp" #include "utilities/stack.inline.hpp" +#ifndef USDT2 + HS_DTRACE_PROBE_DECL2(provider, gc__collection__PSScavenge__begin, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__PSScavenge__end, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__PSParallelCompact__begin, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__PSParallelCompact__end, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__PSMarkSweep__begin, *uintptr_t, *uintptr_t); + HS_DTRACE_PROBE_DECL2(provider, gc__collection__PSMarkSweep__end, *uintptr_t, *uintptr_t); +#endif /* !USDT2 */ HeapWord* PSScavenge::_to_space_top_before_gc = NULL; int PSScavenge::_consecutive_skipped_scavenges = 0; @@ -231,7 +240,13 @@ PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__PSScavenge__begin, &heap, heap->gc_cause()); +#endif /* !USDT2 */ const bool scavenge_done = PSScavenge::invoke_no_policy(); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__PSScavenge__end, &heap, heap->gc_cause()); +#endif /* !USDT2 */ const bool need_full_gc = !scavenge_done || policy->should_full_GC(heap->old_gen()->free_in_bytes()); bool full_gc_done = false; @@ -248,9 +263,21 @@ const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); if (UseParallelOldGC) { +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__PSParallelCompact__begin, &heap, heap->gc_cause()); +#endif /* !USDT2 */ full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__PSParallelCompact__end, &heap, heap->gc_cause()); +#endif /* !USDT2 */ } else { +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__PSMarkSweep__begin, &heap, heap->gc_cause()); +#endif /* !USDT2 */ full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); +#ifndef USDT2 + HS_DTRACE_PROBE2(hotspot, gc__collection__PSMarkSweep__end, &heap, heap->gc_cause()); +#endif /* !USDT2 */ } } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -2103,8 +2103,7 @@ if ( *count_addr > 0 ) { if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { obj = (oop)NULL; - } - else { + } else { if (cache->is_long() || cache->is_double()) { obj = (oop) STACK_OBJECT(-3); } else { @@ -2125,7 +2124,7 @@ // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases // out so c++ compiler has a chance for constant prop to fold everything possible away. - oop obj; + oop obj,too; int count; TosState tos_type = cache->flag_state(); @@ -2149,8 +2148,9 @@ if (tos_type == itos) { obj->release_int_field_put(field_offset, STACK_INT(-1)); } else if (tos_type == atos) { - VERIFY_OOP(STACK_OBJECT(-1)); - obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); + too = (oop) STACK_OBJECT(-1); + VERIFY_OOP(too); + obj->release_obj_field_put(field_offset, too); } else if (tos_type == btos) { obj->release_byte_field_put(field_offset, STACK_INT(-1)); } else if (tos_type == ltos) { @@ -2169,7 +2169,8 @@ if (tos_type == itos) { obj->int_field_put(field_offset, STACK_INT(-1)); } else if (tos_type == atos) { - VERIFY_OOP(STACK_OBJECT(-1)); + too = (oop) STACK_OBJECT(-1); + VERIFY_OOP(too); // On IA64 we perform all stores of references with release semantics. // This guarantees that everybody using this reference sees a fully // initialized object. On PPC64 we emit a storestore barrier after @@ -2295,7 +2296,8 @@ } CASE(_checkcast): if (STACK_OBJECT(-1) != NULL) { - VERIFY_OOP(STACK_OBJECT(-1)); + oop too = (oop) STACK_OBJECT(-1); + VERIFY_OOP(too); u2 index = Bytes::get_Java_u2(pc+1); // Constant pool may have actual klass or unresolved klass. If it is // unresolved we must resolve it. @@ -2332,7 +2334,8 @@ // Profile instanceof with null_seen and receiver. BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); } else { - VERIFY_OOP(STACK_OBJECT(-1)); + oop too = (oop) STACK_OBJECT(-1); + VERIFY_OOP(too); u2 index = Bytes::get_Java_u2(pc+1); // Constant pool may have actual klass or unresolved klass. If it is // unresolved we must resolve it. @@ -2561,7 +2564,8 @@ // another compliant java compiler. if (cache->is_forced_virtual()) { methodOop callee; - CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); + oop too = (oop) STACK_OBJECT(-(cache->parameter_size())); + CHECK_NULL(too); if (cache->is_vfinal()) { callee = cache->f2_as_vfinal_method(); // Profile 'special case of invokeinterface' final call. @@ -2648,7 +2652,8 @@ { methodOop callee; if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { - CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); + oop too = (oop) STACK_OBJECT(-(cache->parameter_size())); + CHECK_NULL(too); if (cache->is_vfinal()) { callee = cache->f2_as_vfinal_method(); // Profile final call. @@ -2687,7 +2692,8 @@ } } else { if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { - CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); + oop too = (oop) STACK_OBJECT(-(cache->parameter_size())); + CHECK_NULL(too); } callee = cache->f1_as_method(); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/interpreter/linkResolver.cpp --- a/src/share/vm/interpreter/linkResolver.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/interpreter/linkResolver.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -190,6 +190,14 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { methodOop result_oop = klass->uncached_lookup_method(name, signature); + + //JDK 7 does not support default methods, but this code ported from JDK8 to keep code consistent for all JDK. + if (klass->oop_is_array()) { + // Only consider klass and super klass for arrays + result = methodHandle(THREAD, result_oop); + return; + } + if (EnableInvokeDynamic && result_oop != NULL) { vmIntrinsics::ID iid = result_oop->intrinsic_id(); if (MethodHandles::is_signature_polymorphic(iid)) { @@ -424,7 +432,7 @@ // 2. lookup method in resolved klass and its super klasses lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); - if (resolved_method.is_null()) { // not found in the class hierarchy + if (resolved_method.is_null() && !resolved_klass->oop_is_array()) { // not found in the class hierarchy // 3. lookup method in all the interfaces implemented by the resolved klass lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); @@ -437,16 +445,16 @@ CLEAR_PENDING_EXCEPTION; } } + } - if (resolved_method.is_null()) { - // 4. method lookup failed - ResourceMark rm(THREAD); - THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(), - methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), - method_name, - method_signature), - nested_exception); - } + if (resolved_method.is_null()) { + // 4. method lookup failed + ResourceMark rm(THREAD); + THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(), + methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), + method_name, + method_signature), + nested_exception); } // 5. check if method is concrete @@ -517,17 +525,18 @@ // lookup method in this interface or its super, java.lang.Object lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); - if (resolved_method.is_null()) { + if (resolved_method.is_null() && !resolved_klass->oop_is_array()) { // lookup method in all the super-interfaces lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); - if (resolved_method.is_null()) { - // no method found - ResourceMark rm(THREAD); - THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), - methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), - method_name, - method_signature)); - } + } + + if (resolved_method.is_null()) { + // no method found + ResourceMark rm(THREAD); + THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), + methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), + method_name, + method_signature)); } if (check_access) { @@ -617,7 +626,7 @@ // Resolve instance field fieldDescriptor fd; // find_field initializes fd if found - KlassHandle sel_klass(THREAD, instanceKlass::cast(resolved_klass())->find_field(field, sig, &fd)); + KlassHandle sel_klass(THREAD, resolved_klass->find_field(field, sig, &fd)); // check if field exists; i.e., if a klass containing the field def has been selected if (sel_klass.is_null()){ ResourceMark rm(THREAD); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/memory/defNewGeneration.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -42,6 +42,7 @@ #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "utilities/copy.hpp" +#include "utilities/dtrace.hpp" #include "utilities/stack.inline.hpp" #ifdef TARGET_OS_FAMILY_linux # include "thread_linux.inline.hpp" @@ -58,7 +59,10 @@ #ifdef TARGET_OS_FAMILY_bsd # include "thread_bsd.inline.hpp" #endif - +#ifndef USDT2 + HS_DTRACE_PROBE_DECL4(provider, gc__collection__defnew__begin, bool, bool, size_t, bool); + HS_DTRACE_PROBE_DECL4(provider, gc__collection__defnew__end, bool, bool, size_t, bool); +#endif /* !USDT2 */ // // DefNewGeneration functions. @@ -537,6 +541,9 @@ bool clear_all_soft_refs, size_t size, bool is_tlab) { +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__defnew__begin, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -682,6 +689,10 @@ jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; update_time_of_last_gc(now); +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__defnew__end, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ + gch->trace_heap_after_gc(&gc_tracer); gc_tracer.report_tenuring_threshold(tenuring_threshold()); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/memory/generation.cpp --- a/src/share/vm/memory/generation.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/memory/generation.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -41,8 +41,14 @@ #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "utilities/copy.hpp" +#include "utilities/dtrace.hpp" #include "utilities/events.hpp" +#ifndef USDT2 + HS_DTRACE_PROBE_DECL4(provider, gc__collection__contig__begin, bool, bool, size_t, bool); + HS_DTRACE_PROBE_DECL4(provider, gc__collection__contig__end, bool, bool, size_t, bool); +#endif /* !USDT2 */ + Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : _level(level), _ref_processor(NULL) { @@ -481,7 +487,13 @@ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__contig__begin, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__contig__end, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ gc_timer->register_gc_end(); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/memory/tenuredGeneration.cpp --- a/src/share/vm/memory/tenuredGeneration.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/memory/tenuredGeneration.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -33,6 +33,12 @@ #include "memory/tenuredGeneration.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" +#include "utilities/dtrace.hpp" + +#ifndef USDT2 + HS_DTRACE_PROBE_DECL4(provider, gc__collection__tenured__begin, bool, bool, size_t, bool); + HS_DTRACE_PROBE_DECL4(provider, gc__collection__tenured__end, bool, bool, size_t, bool); +#endif /* !USDT2 */ TenuredGeneration::TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, int level, @@ -307,8 +313,14 @@ size_t size, bool is_tlab) { retire_alloc_buffers_before_full_gc(); +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__tenured__begin, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs, size, is_tlab); +#ifndef USDT2 + HS_DTRACE_PROBE4(hotspot, gc__collection__tenured__end, full, clear_all_soft_refs, size, is_tlab); +#endif /* !USDT2 */ } void TenuredGeneration::update_gc_stats(int current_level, diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/arrayKlass.cpp --- a/src/share/vm/oops/arrayKlass.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/arrayKlass.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,6 +64,13 @@ return NULL; } +// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined +klassOop arrayKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { + // There are no fields in an array klass but look to the super class (Object) + assert(super(), "super klass must be present"); + return Klass::cast(super())->find_field(name, sig, fd); +} + methodOop arrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const { // There are no methods in an array klass but the super class (Object) has some assert(super(), "super klass must be present"); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/arrayKlass.hpp --- a/src/share/vm/oops/arrayKlass.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/arrayKlass.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,8 @@ #include "oops/klassOop.hpp" #include "oops/klassVtable.hpp" +class fieldDescriptor; + // arrayKlass is the abstract baseclass for all array classes class arrayKlass: public Klass { @@ -83,6 +85,9 @@ virtual oop multi_allocate(int rank, jint* sizes, TRAPS); objArrayOop allocate_arrayArray(int n, int length, TRAPS); + // find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined + klassOop find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const; + // Lookup operations methodOop uncached_lookup_method(Symbol* name, Symbol* signature) const; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/cpCacheOop.hpp --- a/src/share/vm/oops/cpCacheOop.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/cpCacheOop.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -140,14 +140,15 @@ oop_store(&_f1, f1); } void release_set_f1(oop f1); - void set_f2(intx f2) { - assert(_f2 == 0 || _f2 == f2, "illegal field change"); - _f2 = f2; + void set_f2(intx f2) { + intx existing_f2 = _f2; // read once + assert(existing_f2 == 0 || existing_f2 == f2, "illegal field change"); + _f2 = f2; } - void set_f2_as_vfinal_method(methodOop f2) { - assert(_f2 == 0 || _f2 == (intptr_t) f2, "illegal field change"); - assert(is_vfinal(), "flags must be set"); - _f2 = (intptr_t) f2; } + void set_f2_as_vfinal_method(methodOop f2) { + assert(is_vfinal(), "flags must be set"); + set_f2((intx)f2); + } int make_flags(TosState state, int option_bits, int field_index_or_method_params); void set_flags(intx flags) { _flags = flags; } bool init_flags_atomic(intx flags); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/klass.cpp --- a/src/share/vm/oops/klass.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/klass.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,6 +119,15 @@ return is_subclass_of(k); } +klassOop Klass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { +#ifdef ASSERT + tty->print_cr("Error: find_field called on a klass oop." + " Likely error: reflection method does not correctly" + " wrap return value in a mirror object."); +#endif + ShouldNotReachHere(); + return NULL; +} methodOop Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const { #ifdef ASSERT diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/klass.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,6 +88,7 @@ class klassVtable; class KlassHandle; class OrderAccess; +class fieldDescriptor; // Holder (or cage) for the C++ vtable of each kind of Klass. // We want to tightly constrain the location of the C++ vtable in the overall layout. @@ -514,6 +515,7 @@ virtual void initialize(TRAPS); // lookup operation for MethodLookupCache friend class MethodLookupCache; + virtual klassOop find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const; virtual methodOop uncached_lookup_method(Symbol* name, Symbol* signature) const; public: methodOop lookup_method(Symbol* name, Symbol* signature) const { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/klassVtable.cpp --- a/src/share/vm/oops/klassVtable.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/klassVtable.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -209,6 +209,17 @@ // For bytecodes not produced by javac together it is possible that a method does not override // the superclass's method, but might indirectly override a super-super class's vtable entry // If none found, return a null superk, else return the superk of the method this does override +// For public and protected methods: if they override a superclass, they will +// also be overridden themselves appropriately. +// Private methods do not override and are not overridden. +// Package Private methods are trickier: +// e.g. P1.A, pub m +// P2.B extends A, package private m +// P1.C extends B, public m +// P1.C.m needs to override P1.A.m and can not override P2.B.m +// Therefore: all package private methods need their own vtable entries for +// them to be the root of an inheritance overriding decision +// Package private methods may also override other vtable entries instanceKlass* klassVtable::find_transitive_override(instanceKlass* initialsuper, methodHandle target_method, int vtable_index, Handle target_loader, Symbol* target_classname, Thread * THREAD) { instanceKlass* superk = initialsuper; @@ -310,8 +321,12 @@ ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION) && ((super_klass = find_transitive_override(super_klass, target_method, i, target_loader, target_classname, THREAD)) != (instanceKlass*)NULL))) { - // overriding, so no new entry - allocate_new = false; + + // Package private methods always need a new entry to root their own + // overriding. They may also override other methods. + if (!target_method()->is_package_private()) { + allocate_new = false; + } if (checkconstraints) { // Override vtable entry if passes loader constraint check @@ -433,6 +448,12 @@ return true; } + // Package private methods always need a new entry to root their own + // overriding. This allows transitive overriding to work. + if (target_method()->is_package_private()) { + return true; + } + // search through the super class hierarchy to see if we need // a new entry ResourceMark rm; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/objArrayOop.hpp --- a/src/share/vm/oops/objArrayOop.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/objArrayOop.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -45,9 +45,10 @@ private: // Give size of objArrayOop in HeapWords minus the header static int array_size(int length) { - const int OopsPerHeapWord = HeapWordSize/heapOopSize; + const uint OopsPerHeapWord = HeapWordSize/heapOopSize; assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0), "Else the following (new) computation would be in error"); + uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord; #ifdef ASSERT // The old code is left in for sanity-checking; it'll // go away pretty soon. XXX @@ -55,16 +56,15 @@ // oop->length() * HeapWordsPerOop; // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer. // The oop elements are aligned up to wordSize - const int HeapWordsPerOop = heapOopSize/HeapWordSize; - int old_res; + const uint HeapWordsPerOop = heapOopSize/HeapWordSize; + uint old_res; if (HeapWordsPerOop > 0) { old_res = length * HeapWordsPerOop; } else { - old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord; + old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord; } + assert(res == old_res, "Inconsistency between old and new."); #endif // ASSERT - int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord; - assert(res == old_res, "Inconsistency between old and new."); return res; } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/oops/typeArrayOop.hpp --- a/src/share/vm/oops/typeArrayOop.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/oops/typeArrayOop.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -168,7 +168,7 @@ DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh)); assert(length <= arrayOopDesc::max_array_length(etype), "no overflow"); - julong size_in_bytes = length; + julong size_in_bytes = (juint)length; size_in_bytes <<= element_shift; size_in_bytes += instance_header_size; julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/opto/bytecodeInfo.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -364,6 +364,13 @@ set_msg("not an accessor"); return false; } + + // Limit inlining depth in case inlining is forced or + // _max_inline_level was increased to compensate for lambda forms. + if (inline_level() > MaxForceInlineLevel) { + set_msg("MaxForceInlineLevel"); + return false; + } if (inline_level() > _max_inline_level) { if (!callee_method->force_inline() || !IncrementalInline) { set_msg("inlining too deep"); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/opto/callGenerator.cpp --- a/src/share/vm/opto/callGenerator.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/opto/callGenerator.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -742,7 +742,7 @@ guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove const int vtable_index = methodOopDesc::invalid_vtable_index; CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true); - assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); + assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; } @@ -808,7 +808,7 @@ } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true); - assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); + assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/opto/graphKit.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -2750,17 +2750,23 @@ } Node* cast_obj = NULL; - if (data != NULL && - // Counter has never been decremented (due to cast failure). - // ...This is a reasonable thing to expect. It is true of - // all casts inserted by javac to implement generic types. - data->as_CounterData()->count() >= 0) { - cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass()); - if (cast_obj != NULL) { - if (failure_control != NULL) // failure is now impossible - (*failure_control) = top(); - // adjust the type of the phi to the exact klass: - phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR)); + if (tk->klass_is_exact()) { + // The following optimization tries to statically cast the speculative type of the object + // (for example obtained during profiling) to the type of the superklass and then do a + // dynamic check that the type of the object is what we expect. To work correctly + // for checkcast and aastore the type of superklass should be exact. + if (data != NULL && + // Counter has never been decremented (due to cast failure). + // ...This is a reasonable thing to expect. It is true of + // all casts inserted by javac to implement generic types. + data->as_CounterData()->count() >= 0) { + cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass()); + if (cast_obj != NULL) { + if (failure_control != NULL) // failure is now impossible + (*failure_control) = top(); + // adjust the type of the phi to the exact klass: + phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR)); + } } } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/opto/ifnode.cpp --- a/src/share/vm/opto/ifnode.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/opto/ifnode.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -503,7 +503,7 @@ jint off = 0; if (l->is_top()) { return 0; - } else if (l->is_Add()) { + } else if (l->Opcode() == Op_AddI) { if ((off = l->in(1)->find_int_con(0)) != 0) { ind = l->in(2); } else if ((off = l->in(2)->find_int_con(0)) != 0) { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/opto/library_call.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -201,7 +201,7 @@ bool inline_math(vmIntrinsics::ID id); bool inline_exp(); bool inline_pow(); - void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); + Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); bool inline_min_max(vmIntrinsics::ID id); Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); // This returns Type::AnyPtr, RawPtr, or OopPtr. @@ -1580,7 +1580,7 @@ return true; } -void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) { +Node* LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) { //------------------- //result=(result.isNaN())? funcAddr():result; // Check: If isNaN() by checking result!=result? then either trap @@ -1596,7 +1596,7 @@ uncommon_trap(Deoptimization::Reason_intrinsic, Deoptimization::Action_make_not_entrant); } - set_result(result); + return result; } else { // If this inlining ever returned NaN in the past, we compile a call // to the runtime to properly handle corner cases @@ -1626,9 +1626,10 @@ result_region->init_req(2, control()); result_val->init_req(2, value); - set_result(result_region, result_val); + set_control(_gvn.transform(result_region)); + return _gvn.transform(result_val); } else { - set_result(result); + return result; } } } @@ -1640,7 +1641,8 @@ Node* arg = round_double_node(argument(0)); Node* n = _gvn.transform(new (C) ExpDNode(C, control(), arg)); - finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); + n = finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); + set_result(n); C->set_has_split_ifs(true); // Has chance for split-if optimization return true; @@ -1650,27 +1652,48 @@ // Inline power instructions, if possible. bool LibraryCallKit::inline_pow() { // Pseudocode for pow - // if (x <= 0.0) { - // long longy = (long)y; - // if ((double)longy == y) { // if y is long - // if (y + 1 == y) longy = 0; // huge number: even - // result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y); + // if (y == 2) { + // return x * x; + // } else { + // if (x <= 0.0) { + // long longy = (long)y; + // if ((double)longy == y) { // if y is long + // if (y + 1 == y) longy = 0; // huge number: even + // result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y); + // } else { + // result = NaN; + // } // } else { - // result = NaN; + // result = DPow(x,y); // } - // } else { - // result = DPow(x,y); + // if (result != result)? { + // result = uncommon_trap() or runtime_call(); + // } + // return result; // } - // if (result != result)? { - // result = uncommon_trap() or runtime_call(); - // } - // return result; Node* x = round_double_node(argument(0)); Node* y = round_double_node(argument(2)); Node* result = NULL; + Node* const_two_node = makecon(TypeD::make(2.0)); + Node* cmp_node = _gvn.transform(new (C) CmpDNode(y, const_two_node)); + Node* bool_node = _gvn.transform(new (C) BoolNode(cmp_node, BoolTest::eq)); + IfNode* if_node = create_and_xform_if(control(), bool_node, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); + Node* if_true = _gvn.transform(new (C) IfTrueNode(if_node)); + Node* if_false = _gvn.transform(new (C) IfFalseNode(if_node)); + + RegionNode* region_node = new (C) RegionNode(3); + region_node->init_req(1, if_true); + + Node* phi_node = new (C) PhiNode(region_node, Type::DOUBLE); + // special case for x^y where y == 2, we can convert it to x * x + phi_node->init_req(1, _gvn.transform(new (C) MulDNode(x, x))); + + // set control to if_false since we will now process the false branch + set_control(if_false); + if (!too_many_traps(Deoptimization::Reason_intrinsic)) { // Short form: skip the fancy tests and just check for NaN result. result = _gvn.transform(new (C) PowDNode(C, control(), x, y)); @@ -1794,7 +1817,15 @@ result = _gvn.transform(phi); } - finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); + result = finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); + + // control from finish_pow_exp is now input to the region node + region_node->set_req(2, control()); + // the result from finish_pow_exp is now input to the phi node + phi_node->init_req(2, result); + set_control(_gvn.transform(region_node)); + record_for_igvn(region_node); + set_result(_gvn.transform(phi_node)); C->set_has_split_ifs(true); // Has chance for split-if optimization return true; @@ -3715,8 +3746,11 @@ } -//------------------------------inline_native_hashcode-------------------- -// Build special case code for calls to hashCode on an object. +/** + * Build special case code for calls to hashCode on an object. This call may + * be virtual (invokevirtual) or bound (invokespecial). For each case we generate + * slightly different code. + */ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { assert(is_static == callee()->is_static(), "correct intrinsic selection"); assert(!(is_virtual && is_static), "either virtual, special, or static"); @@ -3724,11 +3758,9 @@ enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT }; RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT); - PhiNode* result_val = new(C) PhiNode(result_reg, - TypeInt::INT); + PhiNode* result_val = new(C) PhiNode(result_reg, TypeInt::INT); PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO); - PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, - TypePtr::BOTTOM); + PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); Node* obj = NULL; if (!is_static) { // Check for hashing null object @@ -3754,12 +3786,6 @@ return true; } - // After null check, get the object's klass. - Node* obj_klass = load_object_klass(obj); - - // This call may be virtual (invokevirtual) or bound (invokespecial). - // For each case we generate slightly different code. - // We only go to the fast case code if we pass a number of guards. The // paths which do not pass are accumulated in the slow_region. RegionNode* slow_region = new (C) RegionNode(1); @@ -3772,19 +3798,24 @@ // guard for non-virtual calls -- the caller is known to be the native // Object hashCode(). if (is_virtual) { + // After null check, get the object's klass. + Node* obj_klass = load_object_klass(obj); generate_virtual_guard(obj_klass, slow_region); } // Get the header out of the object, use LoadMarkNode when available Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); - Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type()); + // The control of the load must be NULL. Otherwise, the load can move before + // the null check after castPP removal. + Node* no_ctrl = NULL; + Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type()); // Test the header to see if it is unlocked. - Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); - Node *lmasked_header = _gvn.transform( new (C) AndXNode(header, lock_mask) ); - Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); - Node *chk_unlocked = _gvn.transform( new (C) CmpXNode( lmasked_header, unlocked_val)); - Node *test_unlocked = _gvn.transform( new (C) BoolNode( chk_unlocked, BoolTest::ne) ); + Node* lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); + Node* lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); + Node* unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); + Node* chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val)); + Node* test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne)); generate_slow_guard(test_unlocked, slow_region); @@ -3792,19 +3823,19 @@ // We depend on hash_mask being at most 32 bits and avoid the use of // hash_mask_in_place because it could be larger than 32 bits in a 64-bit // vm: see markOop.hpp. - Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask); - Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); - Node *hshifted_header= _gvn.transform( new (C) URShiftXNode(header, hash_shift) ); + Node* hash_mask = _gvn.intcon(markOopDesc::hash_mask); + Node* hash_shift = _gvn.intcon(markOopDesc::hash_shift); + Node* hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift)); // This hack lets the hash bits live anywhere in the mark object now, as long // as the shift drops the relevant bits into the low 32 bits. Note that // Java spec says that HashCode is an int so there's no point in capturing // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). hshifted_header = ConvX2I(hshifted_header); - Node *hash_val = _gvn.transform( new (C) AndINode(hshifted_header, hash_mask) ); - - Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash); - Node *chk_assigned = _gvn.transform( new (C) CmpINode( hash_val, no_hash_val)); - Node *test_assigned = _gvn.transform( new (C) BoolNode( chk_assigned, BoolTest::eq) ); + Node* hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask)); + + Node* no_hash_val = _gvn.intcon(markOopDesc::no_hash); + Node* chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val)); + Node* test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq)); generate_slow_guard(test_assigned, slow_region); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/opto/loopopts.cpp --- a/src/share/vm/opto/loopopts.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/opto/loopopts.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -2754,11 +2754,11 @@ // Hit! Refactor use to use the post-incremented tripcounter. // Compute a post-increment tripcounter. Node *opaq = new (C) Opaque2Node( C, cle->incr() ); - register_new_node( opaq, u_ctrl ); + register_new_node(opaq, exit); Node *neg_stride = _igvn.intcon(-cle->stride_con()); set_ctrl(neg_stride, C->root()); Node *post = new (C) AddINode( opaq, neg_stride); - register_new_node( post, u_ctrl ); + register_new_node(post, exit); _igvn.rehash_node_delayed(use); for (uint j = 1; j < use->req(); j++) { if (use->in(j) == phi) diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/opto/reg_split.cpp --- a/src/share/vm/opto/reg_split.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/opto/reg_split.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -51,15 +51,6 @@ static const char out_of_nodes[] = "out of nodes during split"; -static bool contains_no_live_range_input(const Node* def) { - for (uint i = 1; i < def->req(); ++i) { - if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) { - return false; - } - } - return true; -} - //------------------------------get_spillcopy_wide----------------------------- // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the // wide ideal-register spill-mask if possible. If the 'wide-mask' does @@ -326,7 +317,6 @@ if( def->req() > 1 ) { for( uint i = 1; i < def->req(); i++ ) { Node *in = def->in(i); - // Check for single-def (LRG cannot redefined) uint lidx = n2lidx(in); // On PPC we see rematerialized nodes that have a live-range @@ -337,10 +327,13 @@ // count. #if defined(PPC64) if( lidx >= _maxlrg ) return def; -#else - if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy #endif - if (lrgs(lidx).is_singledef()) continue; + // We do not need this for live ranges that are only defined once. + // However, this is not true for spill copies that are added in this + // Split() pass, since they might get coalesced later on in this pass. + if (lidx < _maxlrg && lrgs(lidx).is_singledef()) { + continue; + } Block *b_def = _cfg._bbs[def->_idx]; int idx_def = b_def->find_node(def); @@ -1314,7 +1307,7 @@ Node *def = Reaches[pidx][slidx]; assert( def, "must have reaching def" ); // If input up/down sense and reg-pressure DISagree - if (def->rematerialize() && contains_no_live_range_input(def)) { + if (def->rematerialize()) { // Place the rematerialized node above any MSCs created during // phi node splitting. end_idx points at the insertion point // so look at the node before it. diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/prims/jvm.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -755,6 +755,7 @@ return (jclass) JNIHandles::make_local(env, Klass::cast(k)->java_mirror()); JVM_END +// Not used; JVM_FindClassFromCaller replaces this. JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name, jboolean init, jobject loader, jboolean throwError)) @@ -781,6 +782,42 @@ return result; JVM_END +// Find a class with this name in this loader, using the caller's protection domain. +JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name, + jboolean init, jobject loader, + jclass caller)) + JVMWrapper2("JVM_FindClassFromCaller %s throws ClassNotFoundException", name); + // Java libraries should ensure that name is never null... + if (name == NULL || (int)strlen(name) > Symbol::max_length()) { + // It's impossible to create this class; the name cannot fit + // into the constant pool. + THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name); + } + + TempNewSymbol h_name = SymbolTable::new_symbol(name, CHECK_NULL); + + oop loader_oop = JNIHandles::resolve(loader); + oop from_class = JNIHandles::resolve(caller); + oop protection_domain = NULL; + // If loader is null, shouldn't call ClassLoader.checkPackageAccess; otherwise get + // NPE. Put it in another way, the bootstrap class loader has all permission and + // thus no checkPackageAccess equivalence in the VM class loader. + // The caller is also passed as NULL by the java code if there is no security + // manager to avoid the performance cost of getting the calling class. + if (from_class != NULL && loader_oop != NULL) { + protection_domain = instanceKlass::cast(java_lang_Class::as_klassOop(from_class))->protection_domain(); + } + + Handle h_loader(THREAD, loader_oop); + Handle h_prot(THREAD, protection_domain); + jclass result = find_class_from_class_loader(env, h_name, init, h_loader, + h_prot, false, THREAD); + + if (TraceClassResolution && result != NULL) { + trace_class_resolution(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(result))); + } + return result; +JVM_END JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name, jboolean init, jclass from)) @@ -4107,10 +4144,15 @@ // Shared JNI/JVM entry points ////////////////////////////////////////////////////////////// -jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS) { +jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, + Handle loader, Handle protection_domain, + jboolean throwError, TRAPS) { // Security Note: // The Java level wrapper will perform the necessary security check allowing - // us to pass the NULL as the initiating class loader. + // us to pass the NULL as the initiating class loader. The VM is responsible for + // the checkPackageAccess relative to the initiating class loader via the + // protection_domain. The protection_domain is passed as NULL by the java code + // if there is no security manager in 3-arg Class.forName(). klassOop klass = SystemDictionary::resolve_or_fail(name, loader, protection_domain, throwError != 0, CHECK_NULL); KlassHandle klass_handle(THREAD, klass); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/prims/jvm.h --- a/src/share/vm/prims/jvm.h Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/prims/jvm.h Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -418,6 +418,19 @@ JVM_FindClassFromBootLoader(JNIEnv *env, const char *name); /* + * Find a class from a given class loader. Throws ClassNotFoundException. + * name: name of class + * init: whether initialization is done + * loader: class loader to look up the class. This may not be the same as the caller's + * class loader. + * caller: initiating class. The initiating class may be null when a security + * manager is not installed. + */ +JNIEXPORT jclass JNICALL +JVM_FindClassFromCaller(JNIEnv *env, const char *name, jboolean init, + jobject loader, jclass caller); + +/* * Find a class from a given class. */ JNIEXPORT jclass JNICALL diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/runtime/arguments.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -2084,19 +2084,9 @@ 1, 100, "TLABWasteTargetPercent"); status = status && verify_object_alignment(); - -#ifdef SPARC - if (UseConcMarkSweepGC || UseG1GC) { - // Issue a stern warning if the user has explicitly set - // UseMemSetInBOT (it is known to cause issues), but allow - // use for experimentation and debugging. - if (VM_Version::is_sun4v() && UseMemSetInBOT) { - assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error"); - warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability" - " on sun4v; please understand that you are using at your own risk!"); - } - } -#endif // SPARC +#ifdef COMPILER1 + status = status && verify_min_value(ValueMapInitialSize, 1, "ValueMapInitialSize"); +#endif // check native memory tracking flags if (PrintNMTStatistics && MemTracker::tracking_level() == MemTracker::NMT_off) { @@ -2104,6 +2094,10 @@ PrintNMTStatistics = false; } +#ifdef COMPILER1 + status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset"); +#endif + return status; } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/runtime/globals.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -64,6 +64,9 @@ #ifdef TARGET_OS_FAMILY_windows # include "globals_windows.hpp" #endif +#ifdef TARGET_OS_FAMILY_aix +# include "globals_aix.hpp" +#endif #ifdef TARGET_OS_FAMILY_bsd # include "globals_bsd.hpp" #endif @@ -94,6 +97,9 @@ #ifdef TARGET_OS_ARCH_linux_ppc # include "globals_linux_ppc.hpp" #endif +#ifdef TARGET_OS_ARCH_aix_ppc +# include "globals_aix_ppc.hpp" +#endif #ifdef TARGET_OS_ARCH_bsd_x86 # include "globals_bsd_x86.hpp" #endif @@ -125,6 +131,9 @@ #ifdef TARGET_OS_FAMILY_windows # include "c1_globals_windows.hpp" #endif +#ifdef TARGET_OS_FAMILY_aix +# include "c1_globals_aix.hpp" +#endif #ifdef TARGET_OS_FAMILY_bsd # include "c1_globals_bsd.hpp" #endif @@ -142,6 +151,9 @@ #ifdef TARGET_ARCH_arm # include "c2_globals_arm.hpp" #endif +#ifdef TARGET_ARCH_ppc +# include "c2_globals_ppc.hpp" +#endif #ifdef TARGET_OS_FAMILY_linux # include "c2_globals_linux.hpp" #endif @@ -151,6 +163,9 @@ #ifdef TARGET_OS_FAMILY_windows # include "c2_globals_windows.hpp" #endif +#ifdef TARGET_OS_FAMILY_aix +# include "c2_globals_aix.hpp" +#endif #ifdef TARGET_OS_FAMILY_bsd # include "c2_globals_bsd.hpp" #endif @@ -179,7 +194,6 @@ define_pd_global(intx, OnStackReplacePercentage, 0); define_pd_global(bool, ResizeTLAB, false); define_pd_global(intx, FreqInlineSize, 0); -define_pd_global(intx, InlineSmallCode, 0); define_pd_global(intx, NewSizeThreadIncrease, 4*K); define_pd_global(intx, InlineClassNatives, true); define_pd_global(intx, InlineUnsafeOps, true); @@ -1112,9 +1126,11 @@ "Prevent spurious or premature wakeups from object.wait " \ "(Solaris only)") \ \ - product(intx, NativeMonitorTimeout, -1, "(Unstable)" ) \ - product(intx, NativeMonitorFlags, 0, "(Unstable)" ) \ - product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" ) \ + experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \ + \ + experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \ + \ + experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ \ develop(bool, UsePthreads, false, \ "Use pthread-based instead of libthread-based synchronization " \ @@ -1220,7 +1236,7 @@ "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ "type after previous bulk rebias") \ \ - diagnostic(bool, JavaObjectsInPerm, false, \ + develop(bool, JavaObjectsInPerm, false, \ "controls whether Classes and interned Strings are allocated" \ "in perm. This purely intended to allow debugging issues" \ "in production.") \ @@ -2727,6 +2743,11 @@ product_pd(bool, ProfileInterpreter, \ "Profile at the bytecode level during interpretation") \ \ + develop(bool, TraceProfileInterpreter, false, \ + "Trace profiling at the bytecode level during interpretation. " \ + "This outputs the profiling information collected to improve " \ + "jit compilation.") \ + \ develop_pd(bool, ProfileTraps, \ "Profile deoptimization traps at the bytecode level") \ \ @@ -2882,6 +2903,9 @@ product(intx, MaxRecursiveInlineLevel, 1, \ "maximum number of nested recursive calls that are inlined") \ \ + develop(intx, MaxForceInlineLevel, 100, \ + "maximum number of nested @ForceInline calls that are inlined") \ + \ product_pd(intx, InlineSmallCode, \ "Only inline already compiled methods if their code size is " \ "less than this") \ @@ -2998,7 +3022,7 @@ product(uintx, InitialHeapSize, 0, \ "Initial heap size (in bytes); zero means OldSize + NewSize") \ \ - product(uintx, MaxHeapSize, ScaleForWordSize(96*M), \ + product(uintx, MaxHeapSize, ScaleForWordSize(512*M), \ "Maximum heap size (in bytes)") \ \ product(uintx, OldSize, ScaleForWordSize(4*M), \ @@ -3164,7 +3188,8 @@ "disable this feature") \ \ /* code cache parameters */ \ - develop(uintx, CodeCacheSegmentSize, 64, \ + /* ppc64 has large code-entry alignment. */ \ + develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64), \ "Code cache segment size (in bytes) - smallest unit of " \ "allocation") \ \ diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/runtime/objectMonitor.cpp --- a/src/share/vm/runtime/objectMonitor.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/runtime/objectMonitor.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -417,6 +417,15 @@ jt->java_suspend_self(); } Self->set_current_pending_monitor(NULL); + + // We cleared the pending monitor info since we've just gotten past + // the enter-check-for-suspend dance and we now own the monitor free + // and clear, i.e., it is no longer pending. The ThreadBlockInVM + // destructor can go to a safepoint at the end of this block. If we + // do a thread dump during that safepoint, then this thread will show + // as having "-locked" the monitor, but the OS and java.lang.Thread + // states will still report that the thread is blocked trying to + // acquire it. } Atomic::dec_ptr(&_count); @@ -1599,33 +1608,25 @@ // post monitor waited event. Note that this is past-tense, we are done waiting. if (JvmtiExport::should_post_monitor_waited()) { JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); - } - // Without the fix for 8028280, it is possible for the above call: - // - // Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; - // - // to consume the unpark() that was done when the successor was set. - // The solution for this very rare possibility is to redo the unpark() - // outside of the JvmtiExport::should_post_monitor_waited() check. - // - if (node._notified != 0 && _succ == Self) { - // In this part of the monitor wait-notify-reenter protocol it - // is possible (and normal) for another thread to do a fastpath - // monitor enter-exit while this thread is still trying to get - // to the reenter portion of the protocol. - // - // The ObjectMonitor was notified and the current thread is - // the successor which also means that an unpark() has already - // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can - // consume the unpark() that was done when the successor was - // set because the same ParkEvent is shared between Java - // monitors and JVM/TI RawMonitors (for now). - // - // We redo the unpark() to ensure forward progress, i.e., we - // don't want all pending threads hanging (parked) with none - // entering the unlocked monitor. - node._event->unpark(); + if (node._notified != 0 && _succ == Self) { + // In this part of the monitor wait-notify-reenter protocol it + // is possible (and normal) for another thread to do a fastpath + // monitor enter-exit while this thread is still trying to get + // to the reenter portion of the protocol. + // + // The ObjectMonitor was notified and the current thread is + // the successor which also means that an unpark() has already + // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can + // consume the unpark() that was done when the successor was + // set because the same ParkEvent is shared between Java + // monitors and JVM/TI RawMonitors (for now). + // + // We redo the unpark() to ensure forward progress, i.e., we + // don't want all pending threads hanging (parked) with none + // entering the unlocked monitor. + node._event->unpark(); + } } if (event.should_commit()) { diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/runtime/os.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -424,7 +424,10 @@ static intx current_thread_id(); static int current_process_id(); static int sleep(Thread* thread, jlong ms, bool interruptable); - static int naked_sleep(); + // Short standalone OS sleep suitable for slow path spin loop. + // Ignores Thread.interrupt() (so keep it short). + // ms = 0, will sleep for the least amount of time allowed by the OS. + static void naked_short_sleep(jlong ms); static void infinite_sleep(); // never returns, use with CAUTION static void yield(); // Yields to all threads with same priority enum YieldResult { @@ -464,9 +467,6 @@ // run cmd in a separate process and return its exit code; or -1 on failures static int fork_and_exec(char *cmd); - // Set file to send error reports. - static void set_error_file(const char *logfile); - // os::exit() is merged with vm_exit() // static void exit(int num); diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/runtime/park.cpp --- a/src/share/vm/runtime/park.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/runtime/park.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -59,58 +59,22 @@ // Start by trying to recycle an existing but unassociated // ParkEvent from the global free list. - for (;;) { - ev = FreeList ; - if (ev == NULL) break ; - // 1: Detach - sequester or privatize the list - // Tantamount to ev = Swap (&FreeList, NULL) - if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) { - continue ; + // Using a spin lock since we are part of the mutex impl. + // 8028280: using concurrent free list without memory management can leak + // pretty badly it turns out. + Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate"); + { + ev = FreeList; + if (ev != NULL) { + FreeList = ev->FreeNext; } - - // We've detached the list. The list in-hand is now - // local to this thread. This thread can operate on the - // list without risk of interference from other threads. - // 2: Extract -- pop the 1st element from the list. - ParkEvent * List = ev->FreeNext ; - if (List == NULL) break ; - for (;;) { - // 3: Try to reattach the residual list - guarantee (List != NULL, "invariant") ; - ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; - if (Arv == NULL) break ; - - // New nodes arrived. Try to detach the recent arrivals. - if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { - continue ; - } - guarantee (Arv != NULL, "invariant") ; - // 4: Merge Arv into List - ParkEvent * Tail = List ; - while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; - Tail->FreeNext = Arv ; - } - break ; } + Thread::SpinRelease(&ListLock); if (ev != NULL) { guarantee (ev->AssociatedWith == NULL, "invariant") ; } else { // Do this the hard way -- materialize a new ParkEvent. - // In rare cases an allocating thread might detach a long list -- - // installing null into FreeList -- and then stall or be obstructed. - // A 2nd thread calling Allocate() would see FreeList == null. - // The list held privately by the 1st thread is unavailable to the 2nd thread. - // In that case the 2nd thread would have to materialize a new ParkEvent, - // even though free ParkEvents existed in the system. In this case we end up - // with more ParkEvents in circulation than we need, but the race is - // rare and the outcome is benign. Ideally, the # of extant ParkEvents - // is equal to the maximum # of threads that existed at any one time. - // Because of the race mentioned above, segments of the freelist - // can be transiently inaccessible. At worst we may end up with the - // # of ParkEvents in circulation slightly above the ideal. - // Note that if we didn't have the TSM/immortal constraint, then - // when reattaching, above, we could trim the list. ev = new ParkEvent () ; guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ; } @@ -124,13 +88,14 @@ if (ev == NULL) return ; guarantee (ev->FreeNext == NULL , "invariant") ; ev->AssociatedWith = NULL ; - for (;;) { - // Push ev onto FreeList - // The mechanism is "half" lock-free. - ParkEvent * List = FreeList ; - ev->FreeNext = List ; - if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ; + // Note that if we didn't have the TSM/immortal constraint, then + // when reattaching we could trim the list. + Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease"); + { + ev->FreeNext = FreeList; + FreeList = ev; } + Thread::SpinRelease(&ListLock); } // Override operator new and delete so we can ensure that the @@ -164,56 +129,21 @@ // Start by trying to recycle an existing but unassociated // Parker from the global free list. - for (;;) { - p = FreeList ; - if (p == NULL) break ; - // 1: Detach - // Tantamount to p = Swap (&FreeList, NULL) - if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) { - continue ; + // 8028280: using concurrent free list without memory management can leak + // pretty badly it turns out. + Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate"); + { + p = FreeList; + if (p != NULL) { + FreeList = p->FreeNext; } - - // We've detached the list. The list in-hand is now - // local to this thread. This thread can operate on the - // list without risk of interference from other threads. - // 2: Extract -- pop the 1st element from the list. - Parker * List = p->FreeNext ; - if (List == NULL) break ; - for (;;) { - // 3: Try to reattach the residual list - guarantee (List != NULL, "invariant") ; - Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; - if (Arv == NULL) break ; - - // New nodes arrived. Try to detach the recent arrivals. - if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { - continue ; - } - guarantee (Arv != NULL, "invariant") ; - // 4: Merge Arv into List - Parker * Tail = List ; - while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; - Tail->FreeNext = Arv ; - } - break ; } + Thread::SpinRelease(&ListLock); if (p != NULL) { guarantee (p->AssociatedWith == NULL, "invariant") ; } else { // Do this the hard way -- materialize a new Parker.. - // In rare cases an allocating thread might detach - // a long list -- installing null into FreeList --and - // then stall. Another thread calling Allocate() would see - // FreeList == null and then invoke the ctor. In this case we - // end up with more Parkers in circulation than we need, but - // the race is rare and the outcome is benign. - // Ideally, the # of extant Parkers is equal to the - // maximum # of threads that existed at any one time. - // Because of the race mentioned above, segments of the - // freelist can be transiently inaccessible. At worst - // we may end up with the # of Parkers in circulation - // slightly above the ideal. p = new Parker() ; } p->AssociatedWith = t ; // Associate p with t @@ -227,11 +157,12 @@ guarantee (p->AssociatedWith != NULL, "invariant") ; guarantee (p->FreeNext == NULL , "invariant") ; p->AssociatedWith = NULL ; - for (;;) { - // Push p onto FreeList - Parker * List = FreeList ; - p->FreeNext = List ; - if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ; + + Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease"); + { + p->FreeNext = FreeList; + FreeList = p; } + Thread::SpinRelease(&ListLock); } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/runtime/thread.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -4419,9 +4419,7 @@ ++ctr ; if ((ctr & 0xFFF) == 0 || !os::is_MP()) { if (Yields > 5) { - // Consider using a simple NakedSleep() instead. - // Then SpinAcquire could be called by non-JVM threads - Thread::current()->_ParkEvent->park(1) ; + os::naked_short_sleep(1); } else { os::NakedYield() ; ++Yields ; diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/runtime/vframe.cpp --- a/src/share/vm/runtime/vframe.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/runtime/vframe.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -197,6 +197,7 @@ continue; } if (monitor->owner() != NULL) { + // the monitor is associated with an object, i.e., it is locked // First, assume we have the monitor locked. If we haven't found an // owned monitor before and this is the first frame, then we need to @@ -207,7 +208,11 @@ if (!found_first_monitor && frame_count == 0) { markOop mark = monitor->owner()->mark(); if (mark->has_monitor() && - mark->monitor() == thread()->current_pending_monitor()) { + ( // we have marked ourself as pending on this monitor + mark->monitor() == thread()->current_pending_monitor() || + // we are not the owner of this monitor + !mark->monitor()->is_entered(thread()) + )) { lock_state = "waiting to lock"; } } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/utilities/events.cpp --- a/src/share/vm/utilities/events.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/utilities/events.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,7 +96,7 @@ va_start(ap, format); // Save a copy of begin message and log it. _buffer.printv(format, ap); - Events::log(NULL, _buffer); + Events::log(NULL, "%s", (const char*)_buffer); va_end(ap); } } @@ -105,6 +105,6 @@ if (LogEvents) { // Append " done" to the begin message and log it _buffer.append(" done"); - Events::log(NULL, _buffer); + Events::log(NULL, "%s", (const char*)_buffer); } } diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/utilities/macros.hpp --- a/src/share/vm/utilities/macros.hpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/utilities/macros.hpp Thu Nov 27 11:27:10 2014 +0000 @@ -242,7 +242,11 @@ #define PPC_ONLY(code) code #define NOT_PPC(code) #else -#undef PPC + +#ifdef PPC +#error "PPC is either 32- or 64-bit." +#endif + #define PPC_ONLY(code) #define NOT_PPC(code) code #endif diff -r 80e04c4cd4b2 -r 205e1ae8868b src/share/vm/utilities/vmError.cpp --- a/src/share/vm/utilities/vmError.cpp Tue Nov 25 17:36:55 2014 +0000 +++ b/src/share/vm/utilities/vmError.cpp Thu Nov 27 11:27:10 2014 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -972,7 +972,6 @@ if (fd != -1) { out.print_raw("# An error report file with more information is saved as:\n# "); out.print_raw_cr(buffer); - os::set_error_file(buffer); log.set_fd(fd); } else { diff -r 80e04c4cd4b2 -r 205e1ae8868b test/Makefile --- a/test/Makefile Tue Nov 25 17:36:55 2014 +0000 +++ b/test/Makefile Thu Nov 27 11:27:10 2014 +0000 @@ -173,8 +173,8 @@ JTREG_TESTDIRS = $(TESTDIRS) endif -# Default JTREG to run (win32 script works for everybody) -JTREG = $(JT_HOME)/win32/bin/jtreg +# Default JTREG to run +JTREG = $(JT_HOME)/bin/jtreg # Option to tell jtreg to not run tests marked with "ignore" ifeq ($(PLATFORM), windows) diff -r 80e04c4cd4b2 -r 205e1ae8868b test/compiler/7141637/SpreadNullArg.java --- a/test/compiler/7141637/SpreadNullArg.java Tue Nov 25 17:36:55 2014 +0000 +++ b/test/compiler/7141637/SpreadNullArg.java Thu Nov 27 11:27:10 2014 +0000 @@ -46,13 +46,17 @@ mh_spread_target = MethodHandles.lookup().findStatic(SpreadNullArg.class, "target_spread_arg", mt_ref_arg); result = (int) mh_spreadInvoker.invokeExact(mh_spread_target, (Object[]) null); - } catch(NullPointerException e) { - // Expected exception - do nothing! - } catch(Throwable e) { + throw new Error("Expected IllegalArgumentException was not thrown"); + } catch (IllegalArgumentException e) { + System.out.println("Expected exception : " + e); + } catch (Throwable e) { throw new Error(e); } - if (result != 42) throw new Error("Expected NullPointerException was not thrown"); + if (result != 42) { + throw new Error("result [" + result + + "] != 42 : Expected IllegalArgumentException was not thrown?"); + } } public static int target_spread_arg(Integer i1) { diff -r 80e04c4cd4b2 -r 205e1ae8868b test/compiler/EscapeAnalysis/TestAllocatedEscapesPtrComparison.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/EscapeAnalysis/TestAllocatedEscapesPtrComparison.java Thu Nov 27 11:27:10 2014 +0000 @@ -0,0 +1,107 @@ +/* + * Copyright 2014 Google, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8043354 + * @summary bcEscapeAnalyzer allocated_escapes not conservative enough + * @run main/othervm -XX:CompileOnly=.visitAndPop TestAllocatedEscapesPtrComparison + * @author Chuck Rasbold rasbold@google.com + */ + +/* + * Test always passes with -XX:-OptmimizePtrCompare + */ + +import java.util.ArrayList; +import java.util.List; + +public class TestAllocatedEscapesPtrComparison { + + static TestAllocatedEscapesPtrComparison dummy; + + class Marker { + } + + List markerList = new ArrayList<>(); + + // Suppress compilation of this method, it must be processed + // by the bytecode escape analyzer. + + // Make a new marker and put it on the List + Marker getMarker() { + // result escapes through markerList + final Marker result = new Marker(); + markerList.add(result); + return result; + } + + void visit(int depth) { + // Make a new marker + getMarker(); + + // Call visitAndPop every once in a while + // Cap the depth of our recursive visits + if (depth % 10 == 2) { + visitAndPop(depth + 1); + } else if (depth < 15) { + visit(depth + 1); + } + } + + void visitAndPop(int depth) { + // Random dummy allocation to force EscapeAnalysis to process this method + dummy = new TestAllocatedEscapesPtrComparison(); + + // Make a new marker + Marker marker = getMarker(); + + visit(depth + 1); + + // Walk and pop the marker list up to the current marker + boolean found = false; + for (int i = markerList.size() - 1; i >= 0; i--) { + Marker removed = markerList.remove(i); + + // In the failure, EA mistakenly converts this comparison to false + if (removed == marker) { + found = true; + break; + } + } + + if (!found) { + throw new RuntimeException("test fails"); + } + } + + + public static void main(String args[]) { + TestAllocatedEscapesPtrComparison tc = new TestAllocatedEscapesPtrComparison(); + + // Warmup and run enough times + for (int i = 0; i < 20000; i++) { + tc.visit(0); + } + } +} diff -r 80e04c4cd4b2 -r 205e1ae8868b test/compiler/intrinsics/hashcode/TestHashCode.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/hashcode/TestHashCode.java Thu Nov 27 11:27:10 2014 +0000 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8011646 + * @summary SEGV in compiled code with loop predication + * @run main/othervm -XX:-TieredCompilation -XX:CompileOnly=TestHashCode.m1,Object.hashCode TestHashCode + * + */ + +public class TestHashCode { + static class A { + int i; + } + + static class B extends A { + } + + static boolean crash = false; + + static A m2() { + if (crash) { + return null; + } + return new A(); + } + + static int m1(A aa) { + int res = 0; + for (int i = 0; i < 10; i++) { + A a = m2(); + int j = a.i; + if (aa instanceof B) { + } + res += a.hashCode(); + } + return res; + } + + public static void main(String[] args) { + A a = new A(); + for (int i = 0; i < 20000; i++) { + m1(a); + } + crash = true; + try { + m1(a); + } catch (NullPointerException e) { + System.out.println("Test passed"); + } + } +} diff -r 80e04c4cd4b2 -r 205e1ae8868b test/compiler/loopopts/TestLogSum.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/loopopts/TestLogSum.java Thu Nov 27 11:27:10 2014 +0000 @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8046516 + * @summary Segmentation fault in JVM (easily reproducible) + * @run main/othervm -XX:-TieredCompilation -Xbatch TestLogSum + * @author jackkamm@gmail.com + */ + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +public class TestLogSum { + public static void main(String[] args) { + double sum; + + for (int i = 0; i < 6; i++) { + for (int n = 2; n < 30; n++) { + for (int j = 1; j <= n; j++) { + for (int k = 1; k <= j; k++) { + // System.out.println(computeSum(k, j)); + sum = computeSum(k, j); + } + } + } + } + } + + private static Map, Double> cache = new HashMap, Double>(); + public static double computeSum(int x, int y) { + List key = Arrays.asList(new Integer[] {x, y}); + + if (!cache.containsKey(key)) { + + // explicitly creating/updating a double[] array, instead of using the LogSumArray wrapper object, will prevent the error + LogSumArray toReturn = new LogSumArray(x); + + // changing loop indices will prevent the error + // in particular, for(z=0; z (a TestThreadDumpMonitorContention$1) + final static Pattern LOCK_PATTERN = Pattern.compile( + ".* locked \\<.*\\(a TestThreadDumpMonitorContention.*"); + + // sanity checking header and thread state lines associated + // with this pattern: + // - waiting to lock <0x000000076ac59e20> (a TestThreadDumpMonitorContention$1) + final static Pattern WAITING_PATTERN = Pattern.compile( + ".* waiting to lock \\<.*\\(a TestThreadDumpMonitorContention.*"); + + final static Object barrier = new Object(); + volatile static boolean done = false; + + static int barrier_cnt = 0; + static int blank_line_match_cnt = 0; + static int error_cnt = 0; + static boolean have_header_line = false; + static boolean have_thread_state_line = false; + static String header_line = null; + static int header_prefix_match_cnt = 0; + static int locked_line_match_cnt = 0; + static String[] locked_match_list = new String[2]; + static int n_samples = 15; + static int sum_both_running_cnt = 0; + static int sum_both_waiting_cnt = 0; + static int sum_contended_cnt = 0; + static int sum_locked_hdr_runnable_cnt = 0; + static int sum_locked_hdr_waiting1_cnt = 0; + static int sum_locked_hdr_waiting2_cnt = 0; + static int sum_locked_thr_state_blocked_cnt = 0; + static int sum_locked_thr_state_runnable_cnt = 0; + static int sum_one_waiting_cnt = 0; + static int sum_uncontended_cnt = 0; + static int sum_waiting_hdr_waiting1_cnt = 0; + static int sum_waiting_thr_state_blocked_cnt = 0; + static String thread_state_line = null; + static boolean verbose = false; + static int waiting_line_match_cnt = 0; + + public static void main(String[] args) throws Exception { + if (args.length != 0) { + int arg_i = 0; + if (args[arg_i].equals("-v")) { + verbose = true; + arg_i++; + } + + try { + n_samples = Integer.parseInt(args[arg_i]); + } catch (NumberFormatException nfe) { + System.err.println(nfe); + usage(); + } + } + + Runnable runnable = new Runnable() { + public void run() { + synchronized (barrier) { + // let the main thread know we're running + barrier_cnt++; + barrier.notify(); + } + while (!done) { + synchronized (this) { } + } + } + }; + Thread[] thread_list = new Thread[2]; + thread_list[0] = new Thread(runnable, "ContendingThread-1"); + thread_list[1] = new Thread(runnable, "ContendingThread-2"); + synchronized (barrier) { + thread_list[0].start(); + thread_list[1].start(); + + // Wait until the contending threads are running so that + // we don't sample any thread init states. + while (barrier_cnt < 2) { + barrier.wait(); + } + } + + doSamples(); + + done = true; + + thread_list[0].join(); + thread_list[1].join(); + + if (error_cnt == 0) { + System.out.println("Test PASSED."); + } else { + System.out.println("Test FAILED."); + throw new AssertionError("error_cnt=" + error_cnt); + } + } + + // Reached a blank line which is the end of the + // stack trace without matching either LOCK_PATTERN + // or WAITING_PATTERN. Rare, but it's not an error. + // + // Example: + // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000] + // java.lang.Thread.State: RUNNABLE + // at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140) + // at java.lang.Thread.run(Thread.java:745) + // + static boolean checkBlankLine(String line) { + if (line.length() == 0) { + blank_line_match_cnt++; + have_header_line = false; + have_thread_state_line = false; + return true; + } + + return false; + } + + // Process the locked line here if we found one. + // + // Example 1: + // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000] + // java.lang.Thread.State: RUNNABLE + // at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140) + // - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1) + // at java.lang.Thread.run(Thread.java:745) + // + // Example 2: + // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000] + // java.lang.Thread.State: BLOCKED (on object monitor) + // at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140) + // - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1) + // at java.lang.Thread.run(Thread.java:745) + // + // Example 3: + // "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000] + // java.lang.Thread.State: RUNNABLE + // JavaThread state: _thread_blocked + // Thread: 0x0000000000ec8800 [0x31] State: _at_safepoint _has_called_back 0 _at_poll_safepoint 0 + // JavaThread state: _thread_blocked + // at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140) + // - locked <0xfffffd7e6d03eb28> (a TestThreadDumpMonitorContention$1) + // at java.lang.Thread.run(Thread.java:745) + // + static boolean checkLockedLine(String line) { + Matcher matcher = LOCK_PATTERN.matcher(line); + if (matcher.matches()) { + if (verbose) { + System.out.println("locked_line='" + line + "'"); + } + locked_match_list[locked_line_match_cnt] = new String(line); + locked_line_match_cnt++; + + matcher = HEADER_RUNNABLE_PATTERN.matcher(header_line); + if (matcher.matches()) { + sum_locked_hdr_runnable_cnt++; + } else { + // It's strange, but a locked line can also + // match the HEADER_WAITING_PATTERN{1,2}. + matcher = HEADER_WAITING_PATTERN1.matcher(header_line); + if (matcher.matches()) { + sum_locked_hdr_waiting1_cnt++; + } else { + matcher = HEADER_WAITING_PATTERN2.matcher(header_line); + if (matcher.matches()) { + sum_locked_hdr_waiting2_cnt++; + } else { + System.err.println(); + System.err.println("ERROR: header line does " + + "not match runnable or waiting patterns."); + System.err.println("ERROR: header_line='" + + header_line + "'"); + System.err.println("ERROR: locked_line='" + line + + "'"); + error_cnt++; + } + } + } + + matcher = THREAD_STATE_RUNNABLE_PATTERN.matcher(thread_state_line); + if (matcher.matches()) { + sum_locked_thr_state_runnable_cnt++; + } else { + // It's strange, but a locked line can also + // match the THREAD_STATE_BLOCKED_PATTERN. + matcher = THREAD_STATE_BLOCKED_PATTERN.matcher( + thread_state_line); + if (matcher.matches()) { + sum_locked_thr_state_blocked_cnt++; + } else { + System.err.println(); + System.err.println("ERROR: thread state line does not " + + "match runnable or waiting patterns."); + System.err.println("ERROR: " + "thread_state_line='" + + thread_state_line + "'"); + System.err.println("ERROR: locked_line='" + line + "'"); + error_cnt++; + } + } + + // Have everything we need from this thread stack + // that matches the LOCK_PATTERN. + have_header_line = false; + have_thread_state_line = false; + return true; + } + + return false; + } + + // Process the waiting line here if we found one. + // + // Example: + // "ContendingThread-2" #22 prio=5 os_prio=64 tid=0x00000000007b9800 nid=0x30 waiting for monitor entry [0xfffffd7fc1010000] + // java.lang.Thread.State: BLOCKED (on object monitor) + // at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140) + // - waiting to lock <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1) + // at java.lang.Thread.run(Thread.java:745) + // + static boolean checkWaitingLine(String line) { + Matcher matcher = WAITING_PATTERN.matcher(line); + if (matcher.matches()) { + waiting_line_match_cnt++; + if (verbose) { + System.out.println("waiting_line='" + line + "'"); + } + + matcher = HEADER_WAITING_PATTERN1.matcher(header_line); + if (matcher.matches()) { + sum_waiting_hdr_waiting1_cnt++; + } else { + System.err.println(); + System.err.println("ERROR: header line does " + + "not match a waiting pattern."); + System.err.println("ERROR: header_line='" + header_line + "'"); + System.err.println("ERROR: waiting_line='" + line + "'"); + error_cnt++; + } + + matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(thread_state_line); + if (matcher.matches()) { + sum_waiting_thr_state_blocked_cnt++; + } else { + System.err.println(); + System.err.println("ERROR: thread state line " + + "does not match a waiting pattern."); + System.err.println("ERROR: thread_state_line='" + + thread_state_line + "'"); + System.err.println("ERROR: waiting_line='" + line + "'"); + error_cnt++; + } + + // Have everything we need from this thread stack + // that matches the WAITING_PATTERN. + have_header_line = false; + have_thread_state_line = false; + return true; + } + + return false; + } + + static void doSamples() throws Exception { + for (int count = 0; count < n_samples; count++) { + blank_line_match_cnt = 0; + header_prefix_match_cnt = 0; + locked_line_match_cnt = 0; + waiting_line_match_cnt = 0; + // verbose mode or an error has a lot of output so add more space + if (verbose || error_cnt > 0) System.out.println(); + System.out.println("Sample #" + count); + + // We don't use the ProcessTools, OutputBuffer or + // OutputAnalyzer classes from the testlibrary because + // we have a complicated multi-line parse to perform + // on a narrow subset of the JSTACK output. + // + // - we only care about stack traces that match + // HEADER_PREFIX_PATTERN; only two should match + // - we care about at most three lines from each stack trace + // - if both stack traces match LOCKED_PATTERN, then that's + // a failure and we report it + // - for a stack trace that matches LOCKED_PATTERN, we verify: + // - the header line matches HEADER_RUNNABLE_PATTERN + // or HEADER_WAITING_PATTERN{1,2} + // - the thread state line matches THREAD_STATE_BLOCKED_PATTERN + // or THREAD_STATE_RUNNABLE_PATTERN + // - we report any mismatches as failures + // - for a stack trace that matches WAITING_PATTERN, we verify: + // - the header line matches HEADER_WAITING_PATTERN1 + // - the thread state line matches THREAD_STATE_BLOCKED_PATTERN + // - we report any mismatches as failures + // - the stack traces that match HEADER_PREFIX_PATTERN may + // not match either LOCKED_PATTERN or WAITING_PATTERN + // because we might observe the thread outside of + // monitor operations; this is not considered a failure + // + // When we do observe LOCKED_PATTERN or WAITING_PATTERN, + // then we are checking the header and thread state patterns + // that occurred earlier in the current stack trace that + // matched HEADER_PREFIX_PATTERN. We don't use data from + // stack traces that don't match HEADER_PREFIX_PATTERN and + // we don't mix data between the two stack traces that do + // match HEADER_PREFIX_PATTERN. + // + Process process = new ProcessBuilder(JSTACK, PID) + .redirectErrorStream(true).start(); + + BufferedReader reader = new BufferedReader(new InputStreamReader( + process.getInputStream())); + String line; + while ((line = reader.readLine()) != null) { + Matcher matcher = null; + + // process the header line here + if (!have_header_line) { + matcher = HEADER_PREFIX_PATTERN.matcher(line); + if (matcher.matches()) { + header_prefix_match_cnt++; + if (verbose) { + System.out.println(); + System.out.println("header='" + line + "'"); + } + header_line = new String(line); + have_header_line = true; + continue; + } + continue; // skip until have a header line + } + + // process the thread state line here + if (!have_thread_state_line) { + matcher = THREAD_STATE_PREFIX_PATTERN.matcher(line); + if (matcher.matches()) { + if (verbose) { + System.out.println("thread_state='" + line + "'"); + } + thread_state_line = new String(line); + have_thread_state_line = true; + continue; + } + continue; // skip until we have a thread state line + } + + // process the locked line here if we find one + if (checkLockedLine(line)) { + continue; + } + + // process the waiting line here if we find one + if (checkWaitingLine(line)) { + continue; + } + + // process the blank line here if we find one + if (checkBlankLine(line)) { + continue; + } + } + process.waitFor(); + + if (header_prefix_match_cnt != 2) { + System.err.println(); + System.err.println("ERROR: should match exactly two headers."); + System.err.println("ERROR: header_prefix_match_cnt=" + + header_prefix_match_cnt); + error_cnt++; + } + + if (locked_line_match_cnt == 2) { + if (locked_match_list[0].equals(locked_match_list[1])) { + System.err.println(); + System.err.println("ERROR: matching lock lines:"); + System.err.println("ERROR: line[0]'" + + locked_match_list[0] + "'"); + System.err.println("ERROR: line[1]'" + + locked_match_list[1] + "'"); + error_cnt++; + } + } + + if (locked_line_match_cnt == 1) { + // one thread has the lock + if (waiting_line_match_cnt == 1) { + // and the other contended for it + sum_contended_cnt++; + } else { + // and the other is just running + sum_uncontended_cnt++; + } + } else if (waiting_line_match_cnt == 1) { + // one thread is waiting + sum_one_waiting_cnt++; + } else if (waiting_line_match_cnt == 2) { + // both threads are waiting + sum_both_waiting_cnt++; + } else { + // both threads are running + sum_both_running_cnt++; + } + + // slight delay between jstack launches + Thread.sleep(500); + } + + if (error_cnt != 0) { + // skip summary info since there were errors + return; + } + + System.out.println("INFO: Summary for all samples:"); + System.out.println("INFO: both_running_cnt=" + sum_both_running_cnt); + System.out.println("INFO: both_waiting_cnt=" + sum_both_waiting_cnt); + System.out.println("INFO: contended_cnt=" + sum_contended_cnt); + System.out.println("INFO: one_waiting_cnt=" + sum_one_waiting_cnt); + System.out.println("INFO: uncontended_cnt=" + sum_uncontended_cnt); + System.out.println("INFO: locked_hdr_runnable_cnt=" + + sum_locked_hdr_runnable_cnt); + System.out.println("INFO: locked_hdr_waiting1_cnt=" + + sum_locked_hdr_waiting1_cnt); + System.out.println("INFO: locked_hdr_waiting2_cnt=" + + sum_locked_hdr_waiting2_cnt); + System.out.println("INFO: locked_thr_state_blocked_cnt=" + + sum_locked_thr_state_blocked_cnt); + System.out.println("INFO: locked_thr_state_runnable_cnt=" + + sum_locked_thr_state_runnable_cnt); + System.out.println("INFO: waiting_hdr_waiting1_cnt=" + + sum_waiting_hdr_waiting1_cnt); + System.out.println("INFO: waiting_thr_state_blocked_cnt=" + + sum_waiting_thr_state_blocked_cnt); + + if (sum_contended_cnt == 0) { + System.err.println("WARNING: the primary scenario for 8036823" + + " has not been exercised by this test run."); + } + } + + // This helper relies on RuntimeMXBean.getName() returning a string + // that looks like this: 5436@mt-haku + // + // The testlibrary has tryFindJvmPid(), but that uses a separate + // process which is much more expensive for finding out your own PID. + // + static String getPid() { + RuntimeMXBean runtimebean = ManagementFactory.getRuntimeMXBean(); + String vmname = runtimebean.getName(); + int i = vmname.indexOf('@'); + if (i != -1) { + vmname = vmname.substring(0, i); + } + return vmname; + } + + static void usage() { + System.err.println("Usage: " + + "java TestThreadDumpMonitorContention [-v] [n_samples]"); + System.exit(1); + } +} diff -r 80e04c4cd4b2 -r 205e1ae8868b test/runtime/classFileParserBug/TestEmptyBootstrapMethodsAttr.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/classFileParserBug/TestEmptyBootstrapMethodsAttr.java Thu Nov 27 11:27:10 2014 +0000 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestEmptyBootstrapMethodsAttr + * @bug 8041918 + * @library /testlibrary + * @summary Test empty bootstrap_methods table within BootstrapMethods attribute + * @compile TestEmptyBootstrapMethodsAttr.java + * @run main TestEmptyBootstrapMethodsAttr + */ + +import java.io.File; +import com.oracle.java.testlibrary.*; + +public class TestEmptyBootstrapMethodsAttr { + + public static void main(String args[]) throws Throwable { + System.out.println("Regression test for bug 8041918"); + String jarFile = System.getProperty("test.src") + File.separator + "emptynumbootstrapmethods.jar"; + + // ====== extract the test case + ProcessBuilder pb = new ProcessBuilder(new String[] { JDKToolFinder.getJDKTool("jar"), "xvf", jarFile } ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + // Test case #1: + // Try loading class with empty bootstrap_methods table where no + // other attributes are following BootstrapMethods in attribute table. + String className = "emptynumbootstrapmethods1"; + + // ======= execute test case #1 + // Expect a lack of main method, this implies that the class loaded correctly + // with an empty bootstrap_methods and did not generate a ClassFormatError. + pb = ProcessTools.createJavaProcessBuilder("-cp", ".", className); + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("java.lang.ClassFormatError"); + output.shouldContain("Main method not found in class " + className); + output.shouldHaveExitValue(1); + + // Test case #2: + // Try loading class with empty bootstrap_methods table where an + // AnnotationDefault attribute follows the BootstrapMethods in the attribute table. + className = "emptynumbootstrapmethods2"; + + // ======= execute test case #2 + // Expect a lack of main method, this implies that the class loaded correctly + // with an empty bootstrap_methods and did not generate ClassFormatError. + pb = ProcessTools.createJavaProcessBuilder("-cp", ".", className); + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("java.lang.ClassFormatError"); + output.shouldContain("Main method not found in class " + className); + output.shouldHaveExitValue(1); + } +} diff -r 80e04c4cd4b2 -r 205e1ae8868b test/runtime/classFileParserBug/emptynumbootstrapmethods.jar Binary file test/runtime/classFileParserBug/emptynumbootstrapmethods.jar has changed diff -r 80e04c4cd4b2 -r 205e1ae8868b test/runtime/classFileParserBug/emptynumbootstrapmethods1.jcod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/classFileParserBug/emptynumbootstrapmethods1.jcod Thu Nov 27 11:27:10 2014 +0000 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * This test contains a BootstrapMethods attribute with an empty + * bootstrap_methods table. This yields a BootstrapMethods + * attribute length of 2 and should not cause a + * java.lang.ClassFormatError to be thrown. + */ +class emptynumbootstrapmethods1 { + 0xCAFEBABE; + 0; // minor version + 51; // version + [12] { // Constant Pool + ; // first element is empty + class #2; // #1 at 0x0A + Utf8 "emptynumbootstrapmethods1"; // #2 at 0x0D + class #4; // #3 at 0x1F + Utf8 "java/lang/Object"; // #4 at 0x22 + MethodHandle 5b #9; // #5 at 0x35 + NameAndType #7 #8; // #6 at 0x39 + Utf8 "equals"; // #7 at 0x3E + Utf8 "(Ljava/lang/Object;)Z"; // #8 at 0x47 + Method #3 #6; // #9 at 0x5F + Utf8 "equalsx"; // #10 at 0x3E + Utf8 "BootstrapMethods"; // #11 at 0x69 + } // Constant Pool + + 0x0001; // access + #1;// this_cpx + #3;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [0] { // fields + } // fields + + [0] { // methods + } // methods + + [1] { // Attributes + Attr(#11, 2) { // BootstrapMethods at 0x8A + [0] { // bootstrap_methods + } + } // end BootstrapMethods + } // Attributes +} // end class atrbsm00101m10p diff -r 80e04c4cd4b2 -r 205e1ae8868b test/runtime/classFileParserBug/emptynumbootstrapmethods2.jcod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/classFileParserBug/emptynumbootstrapmethods2.jcod Thu Nov 27 11:27:10 2014 +0000 @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * This test contains a BootstrapMethods attribute with an empty + * bootstrap_methods table. This yields a BootstrapMethods + * attribute length of 2 and should not cause a + * java.lang.ClassFormatError to be thrown. To ensure that an empty + * bootstrap_methods table is parsed correctly, another attribute, + * AnnotationDefault, follows the BootstrapMethods attribute in + * the attribute table. + */ + +class emptynumbootstrapmethods2 { + 0xCAFEBABE; + 0; // minor version + 51; // version + [14] { // Constant Pool + ; // first element is empty + class #2; // #1 at 0x0A + Utf8 "emptynumbootstrapmethods2"; // #2 at 0x0D + class #4; // #3 at 0x1F + Utf8 "java/lang/Object"; // #4 at 0x22 + MethodHandle 5b #9; // #5 at 0x35 + NameAndType #7 #8; // #6 at 0x39 + Utf8 "equals"; // #7 at 0x3E + Utf8 "(Ljava/lang/Object;)Z"; // #8 at 0x47 + Method #3 #6; // #9 at 0x5F + Utf8 "equalsx"; // #10 at 0x3E + Utf8 "BootstrapMethods"; // #11 at 0x69 + Utf8 "AnnotationDefault"; // #12 + Utf8 "LAnnotationDefaultI;"; // #13 + } // Constant Pool + + 0x0001; // access + #1;// this_cpx + #3;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [0] { // fields + } // fields + + [0] { // methods + } // methods + + [2] { // Attributes + Attr(#11, 2) { // BootstrapMethods at 0x8A + [0] { // bootstrap_methods + } + } // end BootstrapMethods + ; + Attr(#12) { // AnnotationDefault + [] { // type annotations + { // type annotation + 0x00; // target_type + 0x00; // type_parameter_index + []b { // type_path + } + + #13; // type_index + [] { // element_value_pairs + } // element_value_pairs + } // type annotation + } // type annotations + } // end AnnotationDefault + } // Attributes +} // end class atrbsm00101m10p