Mercurial > hg > openjdk > lambda > jdk
changeset 10515:733713d7517c
Merge
author | jlaskey |
---|---|
date | Wed, 05 Jun 2013 13:10:11 -0300 |
parents | e857b2a3ecee (diff) 193652dff077 (current diff) |
children | 3b464e13a776 |
files | makefiles/CompileLaunchers.gmk |
diffstat | 220 files changed, 15741 insertions(+), 4458 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags Wed May 29 13:22:58 2013 -0300 +++ b/.hgtags Wed Jun 05 13:10:11 2013 -0300 @@ -212,3 +212,5 @@ 8dbb4b159e04de3c447c9242c70505e71f8624c7 jdk8-b88 845025546e35519fbb8970e79fc2a834063a5e19 jdk8-b89 c63eda8f63008a4398d2c22ac8d72f7fef6f9238 jdk8-b90 +169451cf0cc53bde5af24f9820ea3f35ec4b4df4 jdk8-b91 +a2a2a91075ad85becbe10a39d7fd04ef9bea8df5 jdk8-b92
--- a/make/java/management/Exportedfiles.gmk Wed May 29 13:22:58 2013 -0300 +++ b/make/java/management/Exportedfiles.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ FILES_export = \ sun/management/ClassLoadingImpl.java \ + sun/management/DiagnosticCommandImpl.java \ sun/management/FileSystemImpl.java \ sun/management/Flag.java \ sun/management/GarbageCollectorImpl.java \
--- a/make/java/management/FILES_c.gmk Wed May 29 13:22:58 2013 -0300 +++ b/make/java/management/FILES_c.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ FILES_c = \ ClassLoadingImpl.c \ + DiagnosticCommandImpl.c \ FileSystemImpl.c \ Flag.c \ GarbageCollectorImpl.c \
--- a/make/java/management/mapfile-vers Wed May 29 13:22:58 2013 -0300 +++ b/make/java/management/mapfile-vers Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,10 @@ Java_com_sun_management_UnixOperatingSystem_getTotalSwapSpaceSize; Java_com_sun_management_UnixOperatingSystem_initialize; Java_sun_management_ClassLoadingImpl_setVerboseClass; + Java_sun_management_DiagnosticCommandImpl_executeDiagnosticCommand; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommands; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommandInfo; + Java_sun_management_DiagnosticCommandImpl_setNotificationEnabled; Java_sun_management_FileSystemImpl_isAccessUserOnly0; Java_sun_management_Flag_getAllFlagNames; Java_sun_management_Flag_getFlags;
--- a/make/sun/awt/FILES_c_unix.gmk Wed May 29 13:22:58 2013 -0300 +++ b/make/sun/awt/FILES_c_unix.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -171,3 +171,13 @@ GLXSurfaceData.c \ AccelGlyphCache.c \ CUPSfuncs.c + +ifeq ($(PLATFORM), macosx) +FILES_NO_MOTIF_objc = \ + AWTFont.m \ + AWTStrike.m \ + CCharToGlyphMapper.m \ + CGGlyphImages.m \ + CGGlyphOutlines.m \ + CoreTextSupport.m +endif # PLATFORM
--- a/make/sun/awt/FILES_export_unix.gmk Wed May 29 13:22:58 2013 -0300 +++ b/make/sun/awt/FILES_export_unix.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -187,3 +187,14 @@ java/awt/dnd/DnDConstants.java \ sun/awt/CausedFocusEvent.java +ifeq ($(PLATFORM), macosx) +ifeq ($(HEADLESS), true) +FILES_export += \ + sun/awt/SunHints.java \ + sun/font/CCharToGlyphMapper.java \ + sun/font/CFont.java \ + sun/font/CFontManager.java \ + sun/font/CStrike.java \ + sun/font/CStrikeDisposer.java +endif # HEADLESS +endif # PLATFORM
--- a/make/sun/awt/mawt.gmk Wed May 29 13:22:58 2013 -0300 +++ b/make/sun/awt/mawt.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -43,6 +43,10 @@ # compiled based on the motif version. FILES_c = $(FILES_NO_MOTIF_c) +ifeq ($(PLATFORM), macosx) +FILES_objc = $(FILES_NO_MOTIF_objc) +endif # PLATFORM + ifeq ($(PLATFORM), solaris) ifneq ($(ARCH), amd64) FILES_reorder += reorder-$(ARCH) @@ -97,6 +101,10 @@ vpath %.cpp $(SHARE_SRC)/native/$(PKGDIR)/image vpath %.c $(PLATFORM_SRC)/native/$(PKGDIR)/robot_child +ifeq ($(PLATFORM), macosx) +vpath %.m $(call NativeSrcDirList,,native/sun/font) +endif # PLATFORM + # # Libraries to link in. # @@ -192,13 +200,21 @@ $(EVENT_MODEL) ifeq ($(PLATFORM), macosx) -CPPFLAGS += -I$(CUPS_HEADERS_PATH) +CPPFLAGS += -I$(CUPS_HEADERS_PATH) \ + $(call NativeSrcDirList,-I,native/sun/awt) \ + $(call NativeSrcDirList,-I,native/sun/font) ifndef HEADLESS CPPFLAGS += -I$(MOTIF_DIR)/include \ -I$(OPENWIN_HOME)/include LDFLAGS += -L$(MOTIF_LIB) -L$(OPENWIN_LIB) - +else +LDFLAGS += -framework Accelerate \ + -framework ApplicationServices \ + -framework Cocoa \ + -F/System/Library/Frameworks/JavaVM.framework/Frameworks \ + -framework JavaNativeFoundation \ + -framework JavaRuntimeSupport endif # !HEADLESS endif # PLATFORM
--- a/make/tools/CharsetMapping/EUC_KR.map Wed May 29 13:22:58 2013 -0300 +++ b/make/tools/CharsetMapping/EUC_KR.map Wed Jun 05 13:10:11 2013 -0300 @@ -5,6 +5,8 @@ # (2)Added 2 new codepoints (KS X 1001:1998) # 0xA2E6 0x20AC # EURO Sign # 0xA2E7 0x00AE # Registered Sign +# (3) KS X 1001:2002 +# 0xA2E8 0x327E # CIRCLED KOREAN CHARACTER JUEUI (Postal Code Mark) # 0x00 0x0000 0x01 0x0001 @@ -295,6 +297,7 @@ # 0xA2E6 0x20AC # EURO Sign 0xA2E7 0x00AE # Registered Sign +0xA2E8 0x327E # CIRCLED KOREAN CHARACTER JUEUI # 0xA2E0 0x2116 # NUMERO SIGN 0xA2E1 0x33C7 # SQUARE CO
--- a/make/tools/src/build/tools/generatebreakiteratordata/CharSet.java Wed May 29 13:22:58 2013 -0300 +++ b/make/tools/src/build/tools/generatebreakiteratordata/CharSet.java Wed Jun 05 13:10:11 2013 -0300 @@ -39,6 +39,7 @@ package build.tools.generatebreakiteratordata; +import java.util.Arrays; import java.util.Hashtable; /** @@ -701,7 +702,14 @@ * the exact same characters as this one */ public boolean equals(Object that) { - return (that instanceof CharSet) && chars.equals(((CharSet)that).chars); + return (that instanceof CharSet) && Arrays.equals(chars, ((CharSet)that).chars); + } + + /** + * Returns the hash code for this set of characters + */ + public int hashCode() { + return Arrays.hashCode(chars); } /**
--- a/makefiles/CompileJavaClasses.gmk Wed May 29 13:22:58 2013 -0300 +++ b/makefiles/CompileJavaClasses.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -342,7 +342,7 @@ DISABLE_SJAVAC:=true,\ SRC:=$(JDK_TOPDIR)/src/macosx/native/jobjc/src/core/java \ $(JDK_TOPDIR)/src/macosx/native/jobjc/src/runtime-additions/java \ - $(JDK_OUTPUTDIR)/gensrc, \ + $(JDK_OUTPUTDIR)/gensrc_jobjc/src, \ INCLUDES := com/apple/jobjc,\ EXCLUDES := tests/java/com/apple/jobjc,\ BIN:=$(JDK_OUTPUTDIR)/jobjc_classes,\ @@ -355,7 +355,7 @@ SETUP:=GENERATE_JDKBYTECODE,\ SRC:=$(JDK_TOPDIR)/src/macosx/native/jobjc/src/core/java \ $(JDK_TOPDIR)/src/macosx/native/jobjc/src/runtime-additions/java \ - $(JDK_OUTPUTDIR)/gensrc, \ + $(JDK_OUTPUTDIR)/gensrc_jobjc/src, \ INCLUDES := com/apple/jobjc,\ EXCLUDES := tests/java/com/apple/jobjc,\ BIN:=$(JDK_OUTPUTDIR)/jobjc_classes_headers,\
--- a/makefiles/CompileLaunchers.gmk Wed May 29 13:22:58 2013 -0300 +++ b/makefiles/CompileLaunchers.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -472,6 +472,7 @@ -D "JDK_FNAME=unpack200.exe" \ -D "JDK_INTERNAL_NAME=unpack200" \ -D "JDK_FTYPE=0x1L",\ + DEBUG_SYMBOLS:=true,\ MANIFEST:=$(JDK_TOPDIR)/src/windows/resource/unpack200_proto.exe.manifest)) ifeq ($(OPENJDK_TARGET_OS),windows) @@ -555,6 +556,7 @@ $(call SET_SHARED_LIBRARY_NAME,$(LIBRARY_PREFIX)$(SHARED_LIBRARY_SUFFIX)), \ OBJECT_DIR:=$(JDK_OUTPUTDIR)/objs/jexec_obj,\ OUTPUT_DIR:=$(BUILD_JEXEC_DST_DIR),\ + DEBUG_SYMBOLS:=true,\ PROGRAM:=jexec)) BUILD_LAUNCHERS += $(BUILD_JEXEC)
--- a/makefiles/CompileNativeLibraries.gmk Wed May 29 13:22:58 2013 -0300 +++ b/makefiles/CompileNativeLibraries.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -2314,6 +2314,10 @@ $(JDK_TOPDIR)/src/solaris/native/sun/java2d/opengl \ $(JDK_TOPDIR)/src/solaris/native/sun/java2d/x11 +ifeq ($(OPENJDK_TARGET_OS),macosx) + LIBAWT_HEADLESS_DIRS+=$(JDK_TOPDIR)/src/macosx/native/sun/font +endif + LIBAWT_HEADLESS_CFLAGS:=-DHEADLESS=true \ -DX11_PATH=\"$(X11_PATH)\" -DPACKAGE_PATH=\"$(PACKAGE_PATH)\" \ $(CUPS_CFLAGS) \ @@ -2328,6 +2332,12 @@ -I$(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/sun/jdga \ $(foreach dir,$(LIBAWT_HEADLESS_DIRS),-I$(dir)) +ifeq ($(OPENJDK_TARGET_OS),macosx) + LIBAWT_HEADLESS_CFLAGS+=\ + -F/System/Library/Frameworks/JavaVM.framework/Frameworks \ + -F/System/Library/Frameworks/ApplicationServices.framework/Frameworks +endif + LIBAWT_HEADLESS_FILES:=\ awt_Font.c \ HeadlessToolkit.c \ @@ -2356,6 +2366,16 @@ AccelGlyphCache.c \ CUPSfuncs.c +ifeq ($(OPENJDK_TARGET_OS),macosx) + LIBAWT_HEADLESS_FILES+=\ + AWTFont.m \ + AWTStrike.m \ + CCharToGlyphMapper.m \ + CGGlyphImages.m \ + CGGlyphOutlines.m \ + CoreTextSupport.m +endif + LIBAWT_HEADLESS_REORDER:= ifeq ($(OPENJDK_TARGET_OS), solaris) ifneq ($(OPENJDK_TARGET_CPU), x86_64) @@ -2382,7 +2402,13 @@ REORDER:=$(LIBAWT_HEADLESS_REORDER), \ LDFLAGS_SUFFIX_linux:=-ljvm -lawt -lm $(LIBDL) -ljava,\ LDFLAGS_SUFFIX_solaris:=$(LIBDL) -ljvm -lawt -lm -ljava $(LIBCXX) -lc,\ - LDFLAGS_SUFFIX_macosx:=-ljvm $(LIBCXX) -lawt $(LIBDL) -ljava,\ + LDFLAGS_SUFFIX_macosx:=-ljvm $(LIBCXX) -lawt $(LIBDL) -ljava \ + -framework Accelerate \ + -framework ApplicationServices \ + -framework Cocoa \ + -F/System/Library/Frameworks/JavaVM.framework/Frameworks \ + -framework JavaNativeFoundation \ + -framework JavaRuntimeSupport,\ OBJECT_DIR:=$(JDK_OUTPUTDIR)/objs/libawt_headless,\ DEBUG_SYMBOLS:=$(DEBUG_ALL_BINARIES)))
--- a/makefiles/GensrcBuffer.gmk Wed May 29 13:22:58 2013 -0300 +++ b/makefiles/GensrcBuffer.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -69,6 +69,9 @@ $1_fulltype := character $1_Fulltype := Character $1_category := integralType + $1_streams := streamableType + $1_streamtype := int + $1_Streamtype := Int $1_LBPV := 1 endif @@ -97,7 +100,7 @@ $1_Type := Long $1_fulltype := long $1_Fulltype := Long - $1_category := integralType + $1_category := integralType $1_LBPV := 3 endif @@ -231,10 +234,13 @@ $(TOOL_SPP) < $$($1_SRC) > $$($1_OUT).tmp \ -K$$($1_type) \ -K$$($1_category) \ + -K$$($1_streams) \ -Dtype=$$($1_type) \ -DType=$$($1_Type) \ -Dfulltype=$$($1_fulltype) \ -DFulltype=$$($1_Fulltype) \ + -Dstreamtype=$$($1_streamtype) \ + -DStreamtype=$$($1_Streamtype) \ -Dx=$$($1_x) \ -Dmemtype=$$($1_memtype) \ -DMemtype=$$($1_Memtype) \
--- a/makefiles/GensrcSwing.gmk Wed May 29 13:22:58 2013 -0300 +++ b/makefiles/GensrcSwing.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -68,10 +68,17 @@ # Dummy variable so far, in the old build system it was false by default SWINGBEAN_DEBUG_FLAG = false # GenDocletBeanInfo is compiled in Tools.gmk and picks up from $(JDK_OUTPUTDIR)/btclasses -$(JDK_OUTPUTDIR)/gensrc_no_srczip/_the.generated_beaninfo: $(BEANS_SRC) $(JDK_OUTPUTDIR)/gensrc_no_srczip/javax/swing/SwingBeanInfoBase.java $(JDK_OUTPUTDIR)/gensrc/sun/swing/BeanInfoUtils.java $(BUILD_TOOLS) +# LocaleDataMetaInfo needs to be generated before running this to avoid confusing errors +# in the build log. +$(JDK_OUTPUTDIR)/gensrc_no_srczip/_the.generated_beaninfo: $(BEANS_SRC) \ + $(JDK_OUTPUTDIR)/gensrc_no_srczip/javax/swing/SwingBeanInfoBase.java \ + $(JDK_OUTPUTDIR)/gensrc/sun/swing/BeanInfoUtils.java $(BUILD_TOOLS) \ + | $(GENSRC_LOCALEDATAMETAINFO) $(ECHO) Generating beaninfo $(MKDIR) -p $(JDK_OUTPUTDIR)/gensrc_no_srczip/javax/swing - $(JAVA) -Djava.awt.headless=true $(NEW_JAVADOC) -doclet GenDocletBeanInfo \ + $(JAVA) -Djava.awt.headless=true $(NEW_JAVADOC) \ + -sourcepath "$(JDK_TOPDIR)/src/share/classes$(PATH_SEP)$(JDK_OUTPUTDIR)/gensrc" \ + -doclet GenDocletBeanInfo \ -x $(SWINGBEAN_DEBUG_FLAG) -d $(JDK_OUTPUTDIR)/gensrc_no_srczip/javax/swing \ -t $(DOCLETSRC_DIR)/SwingBeanInfo.template -docletpath $(JDK_OUTPUTDIR)/btclasses \ -XDignore.symbol.file=true \
--- a/makefiles/Images.gmk Wed May 29 13:22:58 2013 -0300 +++ b/makefiles/Images.gmk Wed Jun 05 13:10:11 2013 -0300 @@ -352,11 +352,8 @@ JDK_MAN_PAGES += jvisualvm.1 endif - ifndef OPENJDK - MAN_SRC_BASEDIR:=$(JDK_TOPDIR)/src/closed - else - MAN_SRC_BASEDIR:=$(JDK_TOPDIR)/src - endif + # This variable is potentially overridden in the closed makefile. + MAN_SRC_BASEDIR ?= $(JDK_TOPDIR)/src ifeq ($(OPENJDK_TARGET_OS), linux) MAN_SRC_DIR:=$(MAN_SRC_BASEDIR)/linux/doc
--- a/makefiles/mapfiles/libmanagement/mapfile-vers Wed May 29 13:22:58 2013 -0300 +++ b/makefiles/mapfiles/libmanagement/mapfile-vers Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,10 @@ Java_com_sun_management_UnixOperatingSystem_getTotalSwapSpaceSize; Java_com_sun_management_UnixOperatingSystem_initialize; Java_sun_management_ClassLoadingImpl_setVerboseClass; + Java_sun_management_DiagnosticCommandImpl_executeDiagnosticCommand; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommands; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommandInfo; + Java_sun_management_DiagnosticCommandImpl_setNotificationEnabled; Java_sun_management_FileSystemImpl_isAccessUserOnly0; Java_sun_management_Flag_getAllFlagNames; Java_sun_management_Flag_getFlags;
--- a/src/macosx/bin/java_md_macosx.c Wed May 29 13:22:58 2013 -0300 +++ b/src/macosx/bin/java_md_macosx.c Wed Jun 05 13:10:11 2013 -0300 @@ -44,7 +44,6 @@ #include <Cocoa/Cocoa.h> #include <objc/objc-runtime.h> #include <objc/objc-auto.h> -#include <dispatch/dispatch.h> #include <errno.h> #include <spawn.h> @@ -1001,6 +1000,32 @@ setenv(envVar, "1", 1); } +/* This class is made for performSelectorOnMainThread when java main + * should be launched on main thread. + * We cannot use dispatch_sync here, because it blocks the main dispatch queue + * which is used inside Cocoa + */ +@interface JavaLaunchHelper : NSObject { + int _returnValue; +} +- (void) launchJava:(NSValue*)argsValue; +- (int) getReturnValue; +@end + +@implementation JavaLaunchHelper + +- (void) launchJava:(NSValue*)argsValue +{ + _returnValue = JavaMain([argsValue pointerValue]); +} + +- (int) getReturnValue +{ + return _returnValue; +} + +@end + // MacOSX we may continue in the same thread int JVMInit(InvocationFunctions* ifn, jlong threadStackSize, @@ -1010,16 +1035,22 @@ JLI_TraceLauncher("In same thread\n"); // need to block this thread against the main thread // so signals get caught correctly - __block int rslt; - dispatch_sync(dispatch_get_main_queue(), ^(void) { - JavaMainArgs args; - args.argc = argc; - args.argv = argv; - args.mode = mode; - args.what = what; - args.ifn = *ifn; - rslt = JavaMain((void*)&args); - }); + JavaMainArgs args; + args.argc = argc; + args.argv = argv; + args.mode = mode; + args.what = what; + args.ifn = *ifn; + int rslt; + NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; + { + JavaLaunchHelper* launcher = [[[JavaLaunchHelper alloc] init] autorelease]; + [launcher performSelectorOnMainThread:@selector(launchJava:) + withObject:[NSValue valueWithPointer:(void*)&args] + waitUntilDone:YES]; + rslt = [launcher getReturnValue]; + } + [pool drain]; return rslt; } else { return ContinueInNewThread(ifn, threadStackSize, argc, argv, mode, what, ret);
--- a/src/macosx/classes/sun/lwawt/macosx/CDropTargetContextPeer.java Wed May 29 13:22:58 2013 -0300 +++ b/src/macosx/classes/sun/lwawt/macosx/CDropTargetContextPeer.java Wed Jun 05 13:10:11 2013 -0300 @@ -38,7 +38,7 @@ private long fNativeDropTransfer = 0; private long fNativeDataAvailable = 0; private Object fNativeData = null; - private boolean insideTarget = false; + private boolean insideTarget = true; Object awtLockAccess = new Object();
--- a/src/macosx/classes/sun/lwawt/macosx/CPlatformWindow.java Wed May 29 13:22:58 2013 -0300 +++ b/src/macosx/classes/sun/lwawt/macosx/CPlatformWindow.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,6 +115,8 @@ static final int RESIZABLE = 1 << 9; // both a style bit and prop bit static final int NONACTIVATING = 1 << 24; + static final int IS_DIALOG = 1 << 25; + static final int IS_MODAL = 1 << 26; static final int _STYLE_PROP_BITMASK = DECORATED | TEXTURED | UNIFIED | UTILITY | HUD | SHEET | CLOSEABLE | MINIMIZABLE | RESIZABLE; @@ -374,6 +376,13 @@ } } + if (isDialog) { + styleBits = SET(styleBits, IS_DIALOG, true); + if (((Dialog) target).isModal()) { + styleBits = SET(styleBits, IS_MODAL, true); + } + } + peer.setTextured(IS(TEXTURED, styleBits)); return styleBits;
--- a/src/macosx/classes/sun/lwawt/macosx/CPrinterJob.java Wed May 29 13:22:58 2013 -0300 +++ b/src/macosx/classes/sun/lwawt/macosx/CPrinterJob.java Wed Jun 05 13:10:11 2013 -0300 @@ -36,6 +36,7 @@ import javax.print.*; import javax.print.attribute.PrintRequestAttributeSet; import javax.print.attribute.HashPrintRequestAttributeSet; +import javax.print.attribute.standard.PageRanges; import sun.java2d.*; import sun.print.*; @@ -173,6 +174,19 @@ if (nsPrintInfo != null) { fNSPrintInfo = nsPrintInfo.getValue(); } + + PageRanges pageRangesAttr = (PageRanges)attributes.get(PageRanges.class); + if (isSupportedValue(pageRangesAttr, attributes)) { + SunPageSelection rangeSelect = (SunPageSelection)attributes.get(SunPageSelection.class); + // If rangeSelect is not null, we are using AWT's print dialog that has + // All, Selection, and Range radio buttons + if (rangeSelect == null || rangeSelect == SunPageSelection.RANGE) { + int[][] range = pageRangesAttr.getMembers(); + // setPageRange will set firstPage and lastPage as called in getFirstPage + // and getLastPage + setPageRange(range[0][0] - 1, range[0][1] - 1); + } + } } volatile boolean onEventThread; @@ -225,7 +239,6 @@ * the end of the document. Note that firstPage * and lastPage are 0 based page indices. */ - int numPages = mDocument.getNumberOfPages(); int firstPage = getFirstPage(); int lastPage = getLastPage(); @@ -242,42 +255,53 @@ userCancelled = false; } - if (EventQueue.isDispatchThread()) { - // This is an AWT EventQueue, and this print rendering loop needs to block it. - - onEventThread = true; + //Add support for PageRange + PageRanges pr = (attributes == null) ? null + : (PageRanges)attributes.get(PageRanges.class); + int[][] prMembers = (pr == null) ? new int[0][0] : pr.getMembers(); + int loopi = 0; + do { + if (EventQueue.isDispatchThread()) { + // This is an AWT EventQueue, and this print rendering loop needs to block it. - printingLoop = AccessController.doPrivileged(new PrivilegedAction<SecondaryLoop>() { - @Override - public SecondaryLoop run() { - return Toolkit.getDefaultToolkit() - .getSystemEventQueue() - .createSecondaryLoop(); - } - }); + onEventThread = true; + + printingLoop = AccessController.doPrivileged(new PrivilegedAction<SecondaryLoop>() { + @Override + public SecondaryLoop run() { + return Toolkit.getDefaultToolkit() + .getSystemEventQueue() + .createSecondaryLoop(); + } + }); - try { - // Fire off the print rendering loop on the AppKit thread, and don't have - // it wait and block this thread. - if (printLoop(false, firstPage, lastPage)) { - // Start a secondary loop on EDT until printing operation is finished or cancelled - printingLoop.enter(); + try { + // Fire off the print rendering loop on the AppKit thread, and don't have + // it wait and block this thread. + if (printLoop(false, firstPage, lastPage)) { + // Start a secondary loop on EDT until printing operation is finished or cancelled + printingLoop.enter(); + } + } catch (Exception e) { + e.printStackTrace(); } - } catch (Exception e) { - e.printStackTrace(); + } else { + // Fire off the print rendering loop on the AppKit, and block this thread + // until it is done. + // But don't actually block... we need to come back here! + onEventThread = false; + + try { + printLoop(true, firstPage, lastPage); + } catch (Exception e) { + e.printStackTrace(); + } } - } else { - // Fire off the print rendering loop on the AppKit, and block this thread - // until it is done. - // But don't actually block... we need to come back here! - onEventThread = false; - - try { - printLoop(true, firstPage, lastPage); - } catch (Exception e) { - e.printStackTrace(); + if (++loopi < prMembers.length) { + firstPage = prMembers[loopi][0]-1; + lastPage = prMembers[loopi][1] -1; } - } + } while (loopi < prMembers.length); } finally { synchronized (this) { // NOTE: Native code shouldn't allow exceptions out while
--- a/src/macosx/native/sun/awt/AWTWindow.m Wed May 29 13:22:58 2013 -0300 +++ b/src/macosx/native/sun/awt/AWTWindow.m Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -536,8 +536,12 @@ - (void) windowDidBecomeKey: (NSNotification *) notification { AWT_ASSERT_APPKIT_THREAD; [AWTToolkit eventCountPlusPlus]; - [CMenuBar activate:self.javaMenuBar modallyDisabled:NO]; AWTWindow *opposite = [AWTWindow lastKeyWindow]; + if (!IS(self.styleBits, IS_DIALOG)) { + [CMenuBar activate:self.javaMenuBar modallyDisabled:NO]; + } else if (IS(self.styleBits, IS_MODAL)) { + [CMenuBar activate:opposite->javaMenuBar modallyDisabled:YES]; + } [AWTWindow setLastKeyWindow:nil]; [self _deliverWindowFocusEvent:YES oppositeWindow: opposite];
--- a/src/macosx/native/sun/font/AWTFont.m Wed May 29 13:22:58 2013 -0300 +++ b/src/macosx/native/sun/font/AWTFont.m Wed Jun 05 13:10:11 2013 -0300 @@ -395,6 +395,7 @@ #pragma mark --- Miscellaneous JNI --- +#ifndef HEADLESS /* * Class: sun_awt_PlatformFont * Method: initIDs @@ -416,3 +417,4 @@ (JNIEnv *env, jclass cls) { } +#endif
--- a/src/share/classes/com/sun/beans/finder/AbstractFinder.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/com/sun/beans/finder/AbstractFinder.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,9 @@ */ package com.sun.beans.finder; +import java.lang.reflect.Executable; +import java.lang.reflect.Modifier; + import java.util.HashMap; import java.util.Map; @@ -37,7 +40,7 @@ * * @author Sergey A. Malenkov */ -abstract class AbstractFinder<T> { +abstract class AbstractFinder<T extends Executable> { private final Class<?>[] args; /** @@ -53,27 +56,6 @@ } /** - * Returns an array of {@code Class} objects - * that represent the formal parameter types of the method. - * Returns an empty array if the method takes no parameters. - * - * @param method the object that represents method - * @return the parameter types of the method - */ - protected abstract Class<?>[] getParameters(T method); - - /** - * Returns {@code true} if and only if the method - * was declared to take a variable number of arguments. - * - * @param method the object that represents method - * @return {@code true} if the method was declared - * to take a variable number of arguments; - * {@code false} otherwise - */ - protected abstract boolean isVarArgs(T method); - - /** * Checks validness of the method. * At least the valid method should be public. * @@ -81,7 +63,9 @@ * @return {@code true} if the method is valid, * {@code false} otherwise */ - protected abstract boolean isValid(T method); + protected boolean isValid(T method) { + return Modifier.isPublic(method.getModifiers()); + } /** * Performs a search in the {@code methods} array. @@ -109,7 +93,7 @@ for (T newMethod : methods) { if (isValid(newMethod)) { - Class<?>[] newParams = getParameters(newMethod); + Class<?>[] newParams = newMethod.getParameterTypes(); if (newParams.length == this.args.length) { PrimitiveWrapperMap.replacePrimitivesWithWrappers(newParams); if (isAssignable(newParams, this.args)) { @@ -120,6 +104,11 @@ boolean useNew = isAssignable(oldParams, newParams); boolean useOld = isAssignable(newParams, oldParams); + if (useOld && useNew) { + // only if parameters are equal + useNew = !newMethod.isSynthetic(); + useOld = !oldMethod.isSynthetic(); + } if (useOld == useNew) { ambiguous = true; } else if (useNew) { @@ -130,7 +119,7 @@ } } } - if (isVarArgs(newMethod)) { + if (newMethod.isVarArgs()) { int length = newParams.length - 1; if (length <= this.args.length) { Class<?>[] array = new Class<?>[this.args.length]; @@ -160,6 +149,11 @@ boolean useNew = isAssignable(oldParams, newParams); boolean useOld = isAssignable(newParams, oldParams); + if (useOld && useNew) { + // only if parameters are equal + useNew = !newMethod.isSynthetic(); + useOld = !oldMethod.isSynthetic(); + } if (useOld == useNew) { if (oldParams == map.get(oldMethod)) { ambiguous = true;
--- a/src/share/classes/com/sun/beans/finder/ConstructorFinder.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/com/sun/beans/finder/ConstructorFinder.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,44 +86,4 @@ private ConstructorFinder(Class<?>[] args) { super(args); } - - /** - * Returns an array of {@code Class} objects - * that represent the formal parameter types of the constructor. - * Returns an empty array if the constructor takes no parameters. - * - * @param constructor the object that represents constructor - * @return the parameter types of the constructor - */ - @Override - protected Class<?>[] getParameters(Constructor<?> constructor) { - return constructor.getParameterTypes(); - } - - /** - * Returns {@code true} if and only if the constructor - * was declared to take a variable number of arguments. - * - * @param constructor the object that represents constructor - * @return {@code true} if the constructor was declared - * to take a variable number of arguments; - * {@code false} otherwise - */ - @Override - protected boolean isVarArgs(Constructor<?> constructor) { - return constructor.isVarArgs(); - } - - /** - * Checks validness of the constructor. - * The valid constructor should be public. - * - * @param constructor the object that represents constructor - * @return {@code true} if the constructor is valid, - * {@code false} otherwise - */ - @Override - protected boolean isValid(Constructor<?> constructor) { - return Modifier.isPublic(constructor.getModifiers()); - } }
--- a/src/share/classes/com/sun/beans/finder/MethodFinder.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/com/sun/beans/finder/MethodFinder.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -196,33 +196,6 @@ } /** - * Returns an array of {@code Class} objects - * that represent the formal parameter types of the method. - * Returns an empty array if the method takes no parameters. - * - * @param method the object that represents method - * @return the parameter types of the method - */ - @Override - protected Class<?>[] getParameters(Method method) { - return method.getParameterTypes(); - } - - /** - * Returns {@code true} if and only if the method - * was declared to take a variable number of arguments. - * - * @param method the object that represents method - * @return {@code true} if the method was declared - * to take a variable number of arguments; - * {@code false} otherwise - */ - @Override - protected boolean isVarArgs(Method method) { - return method.isVarArgs(); - } - - /** * Checks validness of the method. * The valid method should be public and * should have the specified name. @@ -233,6 +206,6 @@ */ @Override protected boolean isValid(Method method) { - return !method.isBridge() && Modifier.isPublic(method.getModifiers()) && method.getName().equals(this.name); + return super.isValid(method) && method.getName().equals(this.name); } }
--- a/src/share/classes/com/sun/crypto/provider/DHKeyAgreement.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/com/sun/crypto/provider/DHKeyAgreement.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -403,8 +403,9 @@ } return skey; } else if (algorithm.equals("TlsPremasterSecret")) { - // return entire secret - return new SecretKeySpec(secret, "TlsPremasterSecret"); + // remove leading zero bytes per RFC 5246 Section 8.1.2 + return new SecretKeySpec( + KeyUtil.trimZeroes(secret), "TlsPremasterSecret"); } else { throw new NoSuchAlgorithmException("Unsupported secret key " + "algorithm: "+ algorithm);
--- a/src/share/classes/com/sun/crypto/provider/HmacPKCS12PBESHA1.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/com/sun/crypto/provider/HmacPKCS12PBESHA1.java Wed Jun 05 13:10:11 2013 -0300 @@ -86,12 +86,13 @@ throw new InvalidKeyException("SecretKey of PBE type required"); } if (params == null) { - // generate default for salt and iteration count if necessary - if (salt == null) { - salt = new byte[20]; - SunJCE.getRandom().nextBytes(salt); + // should not auto-generate default values since current + // javax.crypto.Mac api does not have any method for caller to + // retrieve the generated defaults. + if ((salt == null) || (iCount == 0)) { + throw new InvalidAlgorithmParameterException + ("PBEParameterSpec required for salt and iteration count"); } - if (iCount == 0) iCount = 100; } else if (!(params instanceof PBEParameterSpec)) { throw new InvalidAlgorithmParameterException ("PBEParameterSpec type required");
--- a/src/share/classes/com/sun/crypto/provider/PBMAC1Core.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/com/sun/crypto/provider/PBMAC1Core.java Wed Jun 05 13:10:11 2013 -0300 @@ -42,12 +42,10 @@ */ abstract class PBMAC1Core extends HmacCore { - private static final int DEFAULT_SALT_LENGTH = 20; - private static final int DEFAULT_COUNT = 4096; - + // NOTE: this class inherits the Cloneable interface from HmacCore + // Need to override clone() if mutable fields are added. private final String kdfAlgo; private final String hashAlgo; - private final PBKDF2Core kdf; private final int blockLength; // in octets /** @@ -56,13 +54,15 @@ */ PBMAC1Core(String kdfAlgo, String hashAlgo, int blockLength) throws NoSuchAlgorithmException { - super(hashAlgo, blockLength); this.kdfAlgo = kdfAlgo; this.hashAlgo = hashAlgo; this.blockLength = blockLength; + } - switch(kdfAlgo) { + private static PBKDF2Core getKDFImpl(String algo) { + PBKDF2Core kdf = null; + switch(algo) { case "HmacSHA1": kdf = new PBKDF2Core.HmacSHA1(); break; @@ -79,9 +79,10 @@ kdf = new PBKDF2Core.HmacSHA512(); break; default: - throw new NoSuchAlgorithmException( - "No MAC implementation for " + kdfAlgo); + throw new ProviderException( + "No MAC implementation for " + algo); } + return kdf; } /** @@ -120,12 +121,13 @@ throw new InvalidKeyException("SecretKey of PBE type required"); } if (params == null) { - // generate default for salt and iteration count if necessary - if (salt == null) { - salt = new byte[DEFAULT_SALT_LENGTH]; - SunJCE.getRandom().nextBytes(salt); + // should not auto-generate default values since current + // javax.crypto.Mac api does not have any method for caller to + // retrieve the generated defaults. + if ((salt == null) || (iCount == 0)) { + throw new InvalidAlgorithmParameterException + ("PBEParameterSpec required for salt and iteration count"); } - if (iCount == 0) iCount = DEFAULT_COUNT; } else if (!(params instanceof PBEParameterSpec)) { throw new InvalidAlgorithmParameterException ("PBEParameterSpec type required"); @@ -168,7 +170,7 @@ java.util.Arrays.fill(passwdChars, ' '); SecretKey s = null; - + PBKDF2Core kdf = getKDFImpl(kdfAlgo); try { s = kdf.engineGenerateSecret(pbeSpec);
--- a/src/share/classes/com/sun/crypto/provider/SunJCE.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/com/sun/crypto/provider/SunJCE.java Wed Jun 05 13:10:11 2013 -0300 @@ -731,10 +731,11 @@ put("Mac.HmacSHA384 SupportedKeyFormats", "RAW"); put("Mac.HmacSHA512 SupportedKeyFormats", "RAW"); put("Mac.HmacPBESHA1 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA224 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA256 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA384 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA512 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA1 SupportedKeyFormatS", "RAW"); + put("Mac.PBEWithHmacSHA224 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA256 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA384 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA512 SupportedKeyFormats", "RAW"); put("Mac.SslMacMD5 SupportedKeyFormats", "RAW"); put("Mac.SslMacSHA1 SupportedKeyFormats", "RAW");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/classes/com/sun/management/DiagnosticCommandMBean.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.sun.management; + +import java.lang.management.PlatformManagedObject; +import javax.management.DynamicMBean; + +/** + * Management interface for the diagnostic commands for the HotSpot Virtual Machine. + * + * <p>The {code DiagnosticCommandMBean} is registered to the + * {@linkplain java.lang.management.ManagementFactory#getPlatformMBeanServer + * platform MBeanServer} as are other platform MBeans. + * + * <p>The {@link javax.management.ObjectName ObjectName} for uniquely identifying + * the diagnostic MBean within an MBeanServer is: + * <blockquote> + * {@code com.sun.management:type=DiagnosticCommand} + * </blockquote> + * + * <p>This MBean is a {@link javax.management.DynamicMBean DynamicMBean} + * and also a {@link javax.management.NotificationEmitter}. + * The {@code DiagnosticCommandMBean} is generated at runtime and is subject to + * modifications during the lifetime of the Java virtual machine. + * + * A <em>diagnostic command</em> is represented as an operation of + * the {@code DiagnosticCommandMBean} interface. Each diagnostic command has: + * <ul> + * <li>the diagnostic command name which is the name being referenced in + * the HotSpot Virtual Machine</li> + * <li>the MBean operation name which is the + * {@linkplain javax.management.MBeanOperationInfo#getName() name} + * generated for the diagnostic command operation invocation. + * The MBean operation name is implementation dependent</li> + * </ul> + * + * The recommended way to transform a diagnostic command name into a MBean + * operation name is as follows: + * <ul> + * <li>All characters from the first one to the first dot are set to be + * lower-case characters</li> + * <li>Every dot or underline character is removed and the following + * character is set to be an upper-case character</li> + * <li>All other characters are copied without modification</li> + * </ul> + * + * <p>The diagnostic command name is always provided with the meta-data on the + * operation in a field named {@code dcmd.name} (see below). + * + * <p>A diagnostic command may or may not support options or arguments. + * All the operations return {@code String} and either take + * no parameter for operations that do not support any option or argument, + * or take a {@code String[]} parameter for operations that support at least + * one option or argument. + * Each option or argument must be stored in a single String. + * Options or arguments split across several String instances are not supported. + * + * <p>The distinction between options and arguments: options are identified by + * the option name while arguments are identified by their position in the + * command line. Options and arguments are processed in the order of the array + * passed to the invocation method. + * + * <p>Like any operation of a dynamic MBean, each of these operations is + * described by {@link javax.management.MBeanOperationInfo MBeanOperationInfo} + * instance. Here's the values returned by this object: + * <ul> + * <li>{@link javax.management.MBeanOperationInfo#getName() getName()} + * returns the operation name generated from the diagnostic command name</li> + * <li>{@link javax.management.MBeanOperationInfo#getDescription() getDescription()} + * returns the diagnostic command description + * (the same as the one return in the 'help' command)</li> + * <li>{@link javax.management.MBeanOperationInfo#getImpact() getImpact()} + * returns <code>ACTION_INFO</code></li> + * <li>{@link javax.management.MBeanOperationInfo#getReturnType() getReturnType()} + * returns {@code java.lang.String}</li> + * <li>{@link javax.management.MBeanOperationInfo#getDescriptor() getDescriptor()} + * returns a Descriptor instance (see below)</li> + * </ul> + * + * <p>The {@link javax.management.Descriptor Descriptor} + * is a collection of fields containing additional + * meta-data for a JMX element. A field is a name and an associated value. + * The additional meta-data provided for an operation associated with a + * diagnostic command are described in the table below: + * <p> + * + * <table border="1" cellpadding="5"> + * <tr> + * <th>Name</th><th>Type</th><th>Description</th> + * </tr> + * <tr> + * <td>dcmd.name</td><td>String</td> + * <td>The original diagnostic command name (not the operation name)</td> + * </tr> + * <tr> + * <td>dcmd.description</td><td>String</td> + * <td>The diagnostic command description</td> + * </tr> + * <tr> + * <td>dcmd.help</td><td>String</td> + * <td>The full help message for this diagnostic command (same output as + * the one produced by the 'help' command)</td> + * </tr> + * <tr> + * <td>dcmd.vmImpact</td><td>String</td> + * <td>The impact of the diagnostic command, + * this value is the same as the one printed in the 'impact' + * section of the help message of the diagnostic command, and it + * is different from the getImpact() of the MBeanOperationInfo</td> + * </tr> + * <tr> + * <td>dcmd.enabled</td><td>boolean</td> + * <td>True if the diagnostic command is enabled, false otherwise</td> + * </tr> + * <tr> + * <td>dcmd.permissionClass</td><td>String</td> + * <td>Some diagnostic command might require a specific permission to be + * executed, in addition to the MBeanPermission to invoke their + * associated MBean operation. This field returns the fully qualified + * name of the permission class or null if no permission is required + * </td> + * </tr> + * <tr> + * <td>dcmd.permissionName</td><td>String</td> + * <td>The fist argument of the permission required to execute this + * diagnostic command or null if no permission is required</td> + * </tr> + * <tr> + * <td>dcmd.permissionAction</td><td>String</td> + * <td>The second argument of the permission required to execute this + * diagnostic command or null if the permission constructor has only + * one argument (like the ManagementPermission) or if no permission + * is required</td> + * </tr> + * <tr> + * <td>dcmd.arguments</td><td>Descriptor</td> + * <td>A Descriptor instance containing the descriptions of options and + * arguments supported by the diagnostic command (see below)</td> + * </tr> + * </table> + * <p> + * + * <p>The description of parameters (options or arguments) of a diagnostic + * command is provided within a Descriptor instance. In this Descriptor, + * each field name is a parameter name, and each field value is itself + * a Descriptor instance. The fields provided in this second Descriptor + * instance are described in the table below: + * + * <table border="1" cellpadding="5"> + * <tr> + * <th>Name</th><th>Type</th><th>Description</th> + * </tr> + * <tr> + * <td>dcmd.arg.name</td><td>String</td> + * <td>The name of the parameter</td> + * </tr> + * <tr> + * <td>dcmd.arg.type</td><td>String</td> + * <td>The type of the parameter. The returned String is the name of a type + * recognized by the diagnostic command parser. These types are not + * Java types and are implementation dependent. + * </td> + * </tr> + * <tr> + * <td>dcmd.arg.description</td><td>String</td> + * <td>The parameter description</td> + * </tr> + * <tr> + * <td>dcmd.arg.isMandatory</td><td>boolean</td> + * <td>True if the parameter is mandatory, false otherwise</td> + * </tr> + * <tr> + * <td>dcmd.arg.isOption</td><td>boolean</td> + * <td>True if the parameter is an option, false if it is an argument</td> + * </tr> + * <tr> + * <td>dcmd.arg.isMultiple</td><td>boolean</td> + * <td>True if the parameter can be specified several times, false + * otherwise</td> + * </tr> + * </table> + * + * <p>When the set of diagnostic commands currently supported by the Java + * Virtual Machine is modified, the {@code DiagnosticCommandMBean} emits + * a {@link javax.management.Notification} with a + * {@linkplain javax.management.Notification#getType() type} of + * <a href="{@docRoot}/../../../../api/javax/management/MBeanInfo.html#info-changed"> + * {@code "jmx.mbean.info.changed"}</a> and a + * {@linkplain javax.management.Notification#getUserData() userData} that + * is the new {@code MBeanInfo}. + * + * @since 8 + */ +public interface DiagnosticCommandMBean extends DynamicMBean +{ + +}
--- a/src/share/classes/java/lang/Integer.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/lang/Integer.java Wed Jun 05 13:10:11 2013 -0300 @@ -26,7 +26,6 @@ package java.lang; import java.lang.annotation.Native; -import java.util.Properties; /** * The {@code Integer} class wraps a value of the primitive type @@ -185,7 +184,7 @@ * @since 1.8 */ public static String toUnsignedString(int i, int radix) { - return Long.toString(toUnsignedLong(i), radix); + return Long.toUnsignedString(toUnsignedLong(i), radix); } /** @@ -307,20 +306,39 @@ /** * Convert the integer to an unsigned number. */ - private static String toUnsignedString0(int i, int shift) { - char[] buf = new char[32]; - int charPos = 32; + private static String toUnsignedString0(int val, int shift) { + // assert shift > 0 && shift <=5 : "Illegal shift value"; + int mag = Integer.SIZE - Integer.numberOfLeadingZeros(val); + int chars = Math.max(((mag + (shift - 1)) / shift), 1); + char[] buf = new char[chars]; + + formatUnsignedInt(val, shift, buf, 0, chars); + + // Use special constructor which takes over "buf". + return new String(buf, true); + } + + /** + * Format a long (treated as unsigned) into a character buffer. + * @param val the unsigned int to format + * @param shift the log2 of the base to format in (4 for hex, 3 for octal, 1 for binary) + * @param buf the character buffer to write to + * @param offset the offset in the destination buffer to start at + * @param len the number of characters to write + * @return the lowest character location used + */ + static int formatUnsignedInt(int val, int shift, char[] buf, int offset, int len) { + int charPos = len; int radix = 1 << shift; int mask = radix - 1; do { - buf[--charPos] = digits[i & mask]; - i >>>= shift; - } while (i != 0); + buf[offset + --charPos] = Integer.digits[val & mask]; + val >>>= shift; + } while (val != 0 && charPos > 0); - return new String(buf, charPos, (32 - charPos)); + return charPos; } - final static char [] DigitTens = { '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', @@ -875,6 +893,7 @@ * Returns the value of this {@code Integer} as a {@code long} * after a widening primitive conversion. * @jls 5.1.2 Widening Primitive Conversions + * @see Integer#toUnsignedLong(int) */ public long longValue() { return (long)value;
--- a/src/share/classes/java/lang/Long.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/lang/Long.java Wed Jun 05 13:10:11 2013 -0300 @@ -28,6 +28,7 @@ import java.lang.annotation.Native; import java.math.*; + /** * The {@code Long} class wraps a value of the primitive type {@code * long} in an object. An object of type {@code Long} contains a @@ -344,18 +345,39 @@ } /** - * Convert the integer to an unsigned number. + * Format a long (treated as unsigned) into a String. + * @param val the value to format + * @param shift the log2 of the base to format in (4 for hex, 3 for octal, 1 for binary) */ - private static String toUnsignedString0(long i, int shift) { - char[] buf = new char[64]; - int charPos = 64; + static String toUnsignedString0(long val, int shift) { + // assert shift > 0 && shift <=5 : "Illegal shift value"; + int mag = Long.SIZE - Long.numberOfLeadingZeros(val); + int chars = Math.max(((mag + (shift - 1)) / shift), 1); + char[] buf = new char[chars]; + + formatUnsignedLong(val, shift, buf, 0, chars); + return new String(buf, true); + } + + /** + * Format a long (treated as unsigned) into a character buffer. + * @param val the unsigned long to format + * @param shift the log2 of the base to format in (4 for hex, 3 for octal, 1 for binary) + * @param buf the character buffer to write to + * @param offset the offset in the destination buffer to start at + * @param len the number of characters to write + * @return the lowest character location used + */ + static int formatUnsignedLong(long val, int shift, char[] buf, int offset, int len) { + int charPos = len; int radix = 1 << shift; - long mask = radix - 1; + int mask = radix - 1; do { - buf[--charPos] = Integer.digits[(int)(i & mask)]; - i >>>= shift; - } while (i != 0); - return new String(buf, charPos, (64 - charPos)); + buf[offset + --charPos] = Integer.digits[((int) val) & mask]; + val >>>= shift; + } while (val != 0 && charPos > 0); + + return charPos; } /**
--- a/src/share/classes/java/lang/management/ManagementFactory.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/lang/management/ManagementFactory.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,9 @@ import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.security.AccessController; import java.security.Permission; import java.security.PrivilegedAction; @@ -482,6 +484,11 @@ } } } + HashMap<ObjectName, DynamicMBean> dynmbeans = + ManagementFactoryHelper.getPlatformDynamicMBeans(); + for (Map.Entry<ObjectName, DynamicMBean> e : dynmbeans.entrySet()) { + addDynamicMBean(platformMBeanServer, e.getValue(), e.getKey()); + } } return platformMBeanServer; } @@ -825,4 +832,24 @@ } } + /** + * Registers a DynamicMBean. + */ + private static void addDynamicMBean(final MBeanServer mbs, + final DynamicMBean dmbean, + final ObjectName on) { + try { + AccessController.doPrivileged(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws InstanceAlreadyExistsException, + MBeanRegistrationException, + NotCompliantMBeanException { + mbs.registerMBean(dmbean, on); + return null; + } + }); + } catch (PrivilegedActionException e) { + throw new RuntimeException(e.getException()); + } + } }
--- a/src/share/classes/java/net/HttpCookie.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/net/HttpCookie.java Wed Jun 05 13:10:11 2013 -0300 @@ -128,8 +128,7 @@ * a {@code String} specifying the value of the cookie * * @throws IllegalArgumentException - * if the cookie name contains illegal characters or it is one of - * the tokens reserved for use by the cookie protocol + * if the cookie name contains illegal characters * @throws NullPointerException * if {@code name} is {@code null} * @@ -142,7 +141,7 @@ private HttpCookie(String name, String value, String header) { name = name.trim(); - if (name.length() == 0 || !isToken(name)) { + if (name.length() == 0 || !isToken(name) || name.charAt(0) == '$') { throw new IllegalArgumentException("Illegal cookie name"); } @@ -170,9 +169,8 @@ * @return a List of cookie parsed from header line string * * @throws IllegalArgumentException - * if header string violates the cookie specification's syntax, or - * the cookie name contains illegal characters, or the cookie name - * is one of the tokens reserved for use by the cookie protocol + * if header string violates the cookie specification's syntax or + * the cookie name contains illegal characters. * @throws NullPointerException * if the header string is {@code null} */
--- a/src/share/classes/java/net/HttpURLPermission.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/net/HttpURLPermission.java Wed Jun 05 13:10:11 2013 -0300 @@ -377,7 +377,7 @@ throw new IllegalArgumentException ("unexpected URL scheme"); } if (!u.getSchemeSpecificPart().equals("*")) { - u = URI.create(scheme + "://" + u.getAuthority() + u.getPath()); + u = URI.create(scheme + "://" + u.getRawAuthority() + u.getRawPath()); } return u; }
--- a/src/share/classes/java/nio/Buffer.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/nio/Buffer.java Wed Jun 05 13:10:11 2013 -0300 @@ -25,6 +25,7 @@ package java.nio; +import java.util.Spliterator; /** * A container for data of a specific primitive type. @@ -173,6 +174,13 @@ public abstract class Buffer { + /** + * The characteristics of Spliterators that traverse and split elements + * maintained in Buffers. + */ + static final int SPLITERATOR_CHARACTERISTICS = + Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.ORDERED; + // Invariants: mark <= position <= limit <= capacity private int mark = -1; private int position = 0;
--- a/src/share/classes/java/nio/ByteBufferAs-X-Buffer.java.template Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/nio/ByteBufferAs-X-Buffer.java.template Wed Jun 05 13:10:11 2013 -0300 @@ -115,6 +115,12 @@ return Bits.get$Type$$BO$(bb, ix(checkIndex(i))); } +#if[streamableType] + $type$ getUnchecked(int i) { + return Bits.get$Type$$BO$(bb, ix(i)); + } +#end[streamableType] + #end[rw] public $Type$Buffer put($type$ x) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/classes/java/nio/CharBufferSpliterator.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this +* particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.nio; + +import java.util.Comparator; +import java.util.Spliterator; +import java.util.function.IntConsumer; + +/** + * A Spliterator.OfInt for sources that traverse and split elements + * maintained in a CharBuffer. + * + * @implNote + * The implementation is based on the code for the Array-based spliterators. + */ +class CharBufferSpliterator implements Spliterator.OfInt { + private final CharBuffer buffer; + private int index; // current index, modified on advance/split + private final int limit; + + CharBufferSpliterator(CharBuffer buffer) { + this(buffer, buffer.position(), buffer.limit()); + } + + CharBufferSpliterator(CharBuffer buffer, int origin, int limit) { + assert origin <= limit; + this.buffer = buffer; + this.index = (origin <= limit) ? origin : limit; + this.limit = limit; + } + + @Override + public OfInt trySplit() { + int lo = index, mid = (lo + limit) >>> 1; + return (lo >= mid) + ? null + : new CharBufferSpliterator(buffer, lo, index = mid); + } + + @Override + public void forEachRemaining(IntConsumer action) { + if (action == null) + throw new NullPointerException(); + CharBuffer cb = buffer; + int i = index; + int hi = limit; + index = hi; + while (i < hi) { + action.accept(cb.getUnchecked(i++)); + } + } + + @Override + public boolean tryAdvance(IntConsumer action) { + if (action == null) + throw new NullPointerException(); + if (index >= 0 && index < limit) { + action.accept(buffer.getUnchecked(index++)); + return true; + } + return false; + } + + @Override + public long estimateSize() { + return (long)(limit - index); + } + + @Override + public int characteristics() { + return Buffer.SPLITERATOR_CHARACTERISTICS; + } +}
--- a/src/share/classes/java/nio/Direct-X-Buffer.java.template Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/nio/Direct-X-Buffer.java.template Wed Jun 05 13:10:11 2013 -0300 @@ -253,6 +253,12 @@ return $fromBits$($swap$(unsafe.get$Swaptype$(ix(checkIndex(i))))); } +#if[streamableType] + $type$ getUnchecked(int i) { + return $fromBits$($swap$(unsafe.get$Swaptype$(ix(i)))); + } +#end[streamableType] + public $Type$Buffer get($type$[] dst, int offset, int length) { #if[rw] if ((length << $LG_BYTES_PER_VALUE$) > Bits.JNI_COPY_TO_ARRAY_THRESHOLD) {
--- a/src/share/classes/java/nio/Heap-X-Buffer.java.template Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/nio/Heap-X-Buffer.java.template Wed Jun 05 13:10:11 2013 -0300 @@ -139,6 +139,12 @@ return hb[ix(checkIndex(i))]; } +#if[streamableType] + $type$ getUnchecked(int i) { + return hb[ix(i)]; + } +#end[streamableType] + public $Type$Buffer get($type$[] dst, int offset, int length) { checkBounds(offset, length, dst.length); if (length > remaining())
--- a/src/share/classes/java/nio/StringCharBuffer.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/nio/StringCharBuffer.java Wed Jun 05 13:10:11 2013 -0300 @@ -77,6 +77,10 @@ return str.charAt(checkIndex(index) + offset); } + char getUnchecked(int index) { + return str.charAt(index + offset); + } + // ## Override bulk get methods for better performance public final CharBuffer put(char c) {
--- a/src/share/classes/java/nio/X-Buffer.java.template Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/nio/X-Buffer.java.template Wed Jun 05 13:10:11 2013 -0300 @@ -30,6 +30,11 @@ #if[char] import java.io.IOException; #end[char] +#if[streamableType] +import java.util.Spliterator; +import java.util.stream.StreamSupport; +import java.util.stream.$Streamtype$Stream; +#end[streamableType] /** * $A$ $type$ buffer. @@ -589,6 +594,19 @@ */ public abstract $type$ get(int index); +#if[streamableType] + /** + * Absolute <i>get</i> method. Reads the $type$ at the given + * index without any validation of the index. + * + * @param index + * The index from which the $type$ will be read + * + * @return The $type$ at the given index + */ + abstract $type$ getUnchecked(int index); // package-private +#end[streamableType] + /** * Absolute <i>put</i> method <i>(optional operation)</i>. * @@ -1458,4 +1476,16 @@ #end[byte] +#if[streamableType] + +#if[char] + @Override +#end[char] + public $Streamtype$Stream $type$s() { + return StreamSupport.$streamtype$Stream(() -> new $Type$BufferSpliterator(this), + Buffer.SPLITERATOR_CHARACTERISTICS); + } + +#end[streamableType] + }
--- a/src/share/classes/java/security/AccessControlContext.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/security/AccessControlContext.java Wed Jun 05 13:10:11 2013 -0300 @@ -85,6 +85,15 @@ private DomainCombiner combiner = null; + // limited privilege scope + private Permission permissions[]; + private AccessControlContext parent; + private boolean isWrapped; + + // is constrained by limited privilege scope? + private boolean isLimited; + private ProtectionDomain limitedContext[]; + private static boolean debugInit = false; private static Debug debug = null; @@ -178,14 +187,79 @@ /** * package private for AccessController + * + * This "argument wrapper" context will be passed as the actual context + * parameter on an internal doPrivileged() call used in the implementation. */ - AccessControlContext(ProtectionDomain context[], DomainCombiner combiner) { + AccessControlContext(ProtectionDomain caller, DomainCombiner combiner, + AccessControlContext parent, AccessControlContext context, + Permission[] perms) + { + /* + * Combine the domains from the doPrivileged() context into our + * wrapper context, if necessary. + */ + ProtectionDomain[] callerPDs = null; + if (caller != null) { + callerPDs = new ProtectionDomain[] { caller }; + } if (context != null) { - this.context = context.clone(); + if (combiner != null) { + this.context = combiner.combine(callerPDs, context.context); + } else { + this.context = combine(callerPDs, context.context); + } + } else { + /* + * Call combiner even if there is seemingly nothing to combine. + */ + if (combiner != null) { + this.context = combiner.combine(callerPDs, null); + } else { + this.context = combine(callerPDs, null); + } } this.combiner = combiner; + + Permission[] tmp = null; + if (perms != null) { + tmp = new Permission[perms.length]; + for (int i=0; i < perms.length; i++) { + if (perms[i] == null) { + throw new NullPointerException("permission can't be null"); + } + + /* + * An AllPermission argument is equivalent to calling + * doPrivileged() without any limit permissions. + */ + if (perms[i].getClass() == AllPermission.class) { + parent = null; + } + tmp[i] = perms[i]; + } + } + + /* + * For a doPrivileged() with limited privilege scope, initialize + * the relevant fields. + * + * The limitedContext field contains the union of all domains which + * are enclosed by this limited privilege scope. In other words, + * it contains all of the domains which could potentially be checked + * if none of the limiting permissions implied a requested permission. + */ + if (parent != null) { + this.limitedContext = combine(parent.context, parent.limitedContext); + this.isLimited = true; + this.isWrapped = true; + this.permissions = tmp; + this.parent = parent; + this.privilegedContext = context; // used in checkPermission2() + } } + /** * package private constructor for AccessController.getContext() */ @@ -260,6 +334,13 @@ if (sm != null) { sm.checkPermission(SecurityConstants.GET_COMBINER_PERMISSION); } + return getCombiner(); + } + + /** + * package private for AccessController + */ + DomainCombiner getCombiner() { return combiner; } @@ -335,8 +416,10 @@ or the first domain was a Privileged system domain. This is to make the common case for system code very fast */ - if (context == null) + if (context == null) { + checkPermission2(perm); return; + } for (int i=0; i< context.length; i++) { if (context[i] != null && !context[i].implies(perm)) { @@ -370,20 +453,108 @@ debug.println("access allowed "+perm); } - return; + checkPermission2(perm); + } + + /* + * Check the domains associated with the limited privilege scope. + */ + private void checkPermission2(Permission perm) { + if (!isLimited) { + return; + } + + /* + * Check the doPrivileged() context parameter, if present. + */ + if (privilegedContext != null) { + privilegedContext.checkPermission2(perm); + } + + /* + * Ignore the limited permissions and parent fields of a wrapper + * context since they were already carried down into the unwrapped + * context. + */ + if (isWrapped) { + return; + } + + /* + * Try to match any limited privilege scope. + */ + if (permissions != null) { + Class<?> permClass = perm.getClass(); + for (int i=0; i < permissions.length; i++) { + Permission limit = permissions[i]; + if (limit.getClass().equals(permClass) && limit.implies(perm)) { + return; + } + } + } + + /* + * Check the limited privilege scope up the call stack or the inherited + * parent thread call stack of this ACC. + */ + if (parent != null) { + /* + * As an optimization, if the parent context is the inherited call + * stack context from a parent thread then checking the protection + * domains of the parent context is redundant since they have + * already been merged into the child thread's context by + * optimize(). When parent is set to an inherited context this + * context was not directly created by a limited scope + * doPrivileged() and it does not have its own limited permissions. + */ + if (permissions == null) { + parent.checkPermission2(perm); + } else { + parent.checkPermission(perm); + } + } } /** * Take the stack-based context (this) and combine it with the - * privileged or inherited context, if need be. + * privileged or inherited context, if need be. Any limited + * privilege scope is flagged regardless of whether the assigned + * context comes from an immediately enclosing limited doPrivileged(). + * The limited privilege scope can indirectly flow from the inherited + * parent thread or an assigned context previously captured by getContext(). */ AccessControlContext optimize() { // the assigned (privileged or inherited) context AccessControlContext acc; + DomainCombiner combiner = null; + AccessControlContext parent = null; + Permission[] permissions = null; + if (isPrivileged) { acc = privilegedContext; + if (acc != null) { + /* + * If the context is from a limited scope doPrivileged() then + * copy the permissions and parent fields out of the wrapper + * context that was created to hold them. + */ + if (acc.isWrapped) { + permissions = acc.permissions; + parent = acc.parent; + } + } } else { acc = AccessController.getInheritedAccessControlContext(); + if (acc != null) { + /* + * If the inherited context is constrained by a limited scope + * doPrivileged() then set it as our parent so we will process + * the non-domain-related state. + */ + if (acc.isLimited) { + parent = acc; + } + } } // this.context could be null if only system code is on the stack; @@ -393,53 +564,98 @@ // acc.context could be null if only system code was involved; // in that case, ignore the assigned context boolean skipAssigned = (acc == null || acc.context == null); + ProtectionDomain[] assigned = (skipAssigned) ? null : acc.context; + ProtectionDomain[] pd; + + // if there is no enclosing limited privilege scope on the stack or + // inherited from a parent thread + boolean skipLimited = ((acc == null || !acc.isWrapped) && parent == null); if (acc != null && acc.combiner != null) { // let the assigned acc's combiner do its thing - return goCombiner(context, acc); + if (getDebug() != null) { + debug.println("AccessControlContext invoking the Combiner"); + } + + // No need to clone current and assigned.context + // combine() will not update them + combiner = acc.combiner; + pd = combiner.combine(context, assigned); + } else { + if (skipStack) { + if (skipAssigned) { + calculateFields(acc, parent, permissions); + return this; + } else if (skipLimited) { + return acc; + } + } else if (assigned != null) { + if (skipLimited) { + // optimization: if there is a single stack domain and + // that domain is already in the assigned context; no + // need to combine + if (context.length == 1 && context[0] == assigned[0]) { + return acc; + } + } + } + + pd = combine(context, assigned); + if (skipLimited && !skipAssigned && pd == assigned) { + return acc; + } else if (skipAssigned && pd == context) { + calculateFields(acc, parent, permissions); + return this; + } } - // optimization: if neither have contexts; return acc if possible - // rather than this, because acc might have a combiner - if (skipAssigned && skipStack) { - return this; - } + // Reuse existing ACC + this.context = pd; + this.combiner = combiner; + this.isPrivileged = false; + + calculateFields(acc, parent, permissions); + return this; + } + - // optimization: if there is no stack context; there is no reason - // to compress the assigned context, it already is compressed - if (skipStack) { - return acc; - } + /* + * Combine the current (stack) and assigned domains. + */ + private static ProtectionDomain[] combine(ProtectionDomain[]current, + ProtectionDomain[] assigned) { - int slen = context.length; + // current could be null if only system code is on the stack; + // in that case, ignore the stack context + boolean skipStack = (current == null); + + // assigned could be null if only system code was involved; + // in that case, ignore the assigned context + boolean skipAssigned = (assigned == null); + + int slen = (skipStack) ? 0 : current.length; // optimization: if there is no assigned context and the stack length // is less then or equal to two; there is no reason to compress the // stack context, it already is if (skipAssigned && slen <= 2) { - return this; + return current; } - // optimization: if there is a single stack domain and that domain - // is already in the assigned context; no need to combine - if ((slen == 1) && (context[0] == acc.context[0])) { - return acc; - } - - int n = (skipAssigned) ? 0 : acc.context.length; + int n = (skipAssigned) ? 0 : assigned.length; // now we combine both of them, and create a new context ProtectionDomain pd[] = new ProtectionDomain[slen + n]; // first copy in the assigned context domains, no need to compress if (!skipAssigned) { - System.arraycopy(acc.context, 0, pd, 0, n); + System.arraycopy(assigned, 0, pd, 0, n); } // now add the stack context domains, discarding nulls and duplicates outer: - for (int i = 0; i < context.length; i++) { - ProtectionDomain sd = context[i]; + for (int i = 0; i < slen; i++) { + ProtectionDomain sd = current[i]; if (sd != null) { for (int j = 0; j < n; j++) { if (sd == pd[j]) { @@ -453,53 +669,47 @@ // if length isn't equal, we need to shorten the array if (n != pd.length) { // optimization: if we didn't really combine anything - if (!skipAssigned && n == acc.context.length) { - return acc; + if (!skipAssigned && n == assigned.length) { + return assigned; } else if (skipAssigned && n == slen) { - return this; + return current; } ProtectionDomain tmp[] = new ProtectionDomain[n]; System.arraycopy(pd, 0, tmp, 0, n); pd = tmp; } - // return new AccessControlContext(pd, false); - - // Reuse existing ACC - - this.context = pd; - this.combiner = null; - this.isPrivileged = false; - - return this; + return pd; } - private AccessControlContext goCombiner(ProtectionDomain[] current, - AccessControlContext assigned) { - - // the assigned ACC's combiner is not null -- - // let the combiner do its thing - - // XXX we could add optimizations to 'current' here ... - - if (getDebug() != null) { - debug.println("AccessControlContext invoking the Combiner"); - } - // No need to clone current and assigned.context - // combine() will not update them - ProtectionDomain[] combinedPds = assigned.combiner.combine( - current, assigned.context); - - // return new AccessControlContext(combinedPds, assigned.combiner); + /* + * Calculate the additional domains that could potentially be reached via + * limited privilege scope. Mark the context as being subject to limited + * privilege scope unless the reachable domains (if any) are already + * contained in this domain context (in which case any limited + * privilege scope checking would be redundant). + */ + private void calculateFields(AccessControlContext assigned, + AccessControlContext parent, Permission[] permissions) + { + ProtectionDomain[] parentLimit = null; + ProtectionDomain[] assignedLimit = null; + ProtectionDomain[] newLimit; - // Reuse existing ACC - this.context = combinedPds; - this.combiner = assigned.combiner; - this.isPrivileged = false; + parentLimit = (parent != null)? parent.limitedContext: null; + assignedLimit = (assigned != null)? assigned.limitedContext: null; + newLimit = combine(parentLimit, assignedLimit); + if (newLimit != null) { + if (context == null || !containsAllPDs(newLimit, context)) { + this.limitedContext = newLimit; + this.permissions = permissions; + this.parent = parent; + this.isLimited = true; + } + } + } - return this; - } /** * Checks two AccessControlContext objects for equality. @@ -520,31 +730,131 @@ AccessControlContext that = (AccessControlContext) obj; + if (!equalContext(that)) + return false; - if (context == null) { - return (that.context == null); - } + if (!equalLimitedContext(that)) + return false; + + return true; + } - if (that.context == null) + /* + * Compare for equality based on state that is free of limited + * privilege complications. + */ + private boolean equalContext(AccessControlContext that) { + if (!equalPDs(this.context, that.context)) return false; - if (!(this.containsAllPDs(that) && that.containsAllPDs(this))) + if (this.combiner == null && that.combiner != null) + return false; + + if (this.combiner != null && !this.combiner.equals(that.combiner)) return false; - if (this.combiner == null) - return (that.combiner == null); + return true; + } - if (that.combiner == null) + private boolean equalPDs(ProtectionDomain[] a, ProtectionDomain[] b) { + if (a == null) { + return (b == null); + } + + if (b == null) return false; - if (!this.combiner.equals(that.combiner)) + if (!(containsAllPDs(a, b) && containsAllPDs(b, a))) return false; return true; } - private boolean containsAllPDs(AccessControlContext that) { + /* + * Compare for equality based on state that is captured during a + * call to AccessController.getContext() when a limited privilege + * scope is in effect. + */ + private boolean equalLimitedContext(AccessControlContext that) { + if (that == null) + return false; + + /* + * If neither instance has limited privilege scope then we're done. + */ + if (!this.isLimited && !that.isLimited) + return true; + + /* + * If only one instance has limited privilege scope then we're done. + */ + if (!(this.isLimited && that.isLimited)) + return false; + + /* + * Wrapped instances should never escape outside the implementation + * this class and AccessController so this will probably never happen + * but it only makes any sense to compare if they both have the same + * isWrapped state. + */ + if ((this.isWrapped && !that.isWrapped) || + (!this.isWrapped && that.isWrapped)) { + return false; + } + + if (this.permissions == null && that.permissions != null) + return false; + + if (this.permissions != null && that.permissions == null) + return false; + + if (!(this.containsAllLimits(that) && that.containsAllLimits(this))) + return false; + + /* + * Skip through any wrapped contexts. + */ + AccessControlContext thisNextPC = getNextPC(this); + AccessControlContext thatNextPC = getNextPC(that); + + /* + * The protection domains and combiner of a privilegedContext are + * not relevant because they have already been included in the context + * of this instance by optimize() so we only care about any limited + * privilege state they may have. + */ + if (thisNextPC == null && thatNextPC != null && thatNextPC.isLimited) + return false; + + if (thisNextPC != null && !thisNextPC.equalLimitedContext(thatNextPC)) + return false; + + if (this.parent == null && that.parent != null) + return false; + + if (this.parent != null && !this.parent.equals(that.parent)) + return false; + + return true; + } + + /* + * Follow the privilegedContext link making our best effort to skip + * through any wrapper contexts. + */ + private static AccessControlContext getNextPC(AccessControlContext acc) { + while (acc != null && acc.privilegedContext != null) { + acc = acc.privilegedContext; + if (!acc.isWrapped) + return acc; + } + return null; + } + + private static boolean containsAllPDs(ProtectionDomain[] thisContext, + ProtectionDomain[] thatContext) { boolean match = false; + // // ProtectionDomains within an ACC currently cannot be null // and this is enforced by the constructor and the various @@ -552,17 +862,17 @@ // to support the notion of a null PD and therefore this logic continues // to support that notion. ProtectionDomain thisPd; - for (int i = 0; i < context.length; i++) { + for (int i = 0; i < thisContext.length; i++) { match = false; - if ((thisPd = context[i]) == null) { - for (int j = 0; (j < that.context.length) && !match; j++) { - match = (that.context[j] == null); + if ((thisPd = thisContext[i]) == null) { + for (int j = 0; (j < thatContext.length) && !match; j++) { + match = (thatContext[j] == null); } } else { Class<?> thisPdClass = thisPd.getClass(); ProtectionDomain thatPd; - for (int j = 0; (j < that.context.length) && !match; j++) { - thatPd = that.context[j]; + for (int j = 0; (j < thatContext.length) && !match; j++) { + thatPd = thatContext[j]; // Class check required to avoid PD exposure (4285406) match = (thatPd != null && @@ -573,6 +883,29 @@ } return match; } + + private boolean containsAllLimits(AccessControlContext that) { + boolean match = false; + Permission thisPerm; + + if (this.permissions == null && that.permissions == null) + return true; + + for (int i = 0; i < this.permissions.length; i++) { + Permission limit = this.permissions[i]; + Class <?> limitClass = limit.getClass(); + match = false; + for (int j = 0; (j < that.permissions.length) && !match; j++) { + Permission perm = that.permissions[j]; + match = (limitClass.equals(perm.getClass()) && + limit.equals(perm)); + } + if (!match) return false; + } + return match; + } + + /** * Returns the hash code value for this context. The hash code * is computed by exclusive or-ing the hash code of all the protection @@ -591,6 +924,7 @@ if (context[i] != null) hashCode ^= context[i].hashCode(); } + return hashCode; } }
--- a/src/share/classes/java/security/AccessController.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/security/AccessController.java Wed Jun 05 13:10:11 2013 -0300 @@ -82,9 +82,15 @@ * else if (caller i is marked as privileged) { * if (a context was specified in the call to doPrivileged) * context.checkPermission(permission) - * return; + * if (limited permissions were specified in the call to doPrivileged) { + * for (each limited permission) { + * if (the limited permission implies the requested permission) + * return; + * } + * } else + * return; * } - * }; + * } * * // Next, check the context inherited when the thread was created. * // Whenever a new thread is created, the AccessControlContext at @@ -101,11 +107,16 @@ * was marked as "privileged" via a <code>doPrivileged</code> * call without a context argument (see below for information about a * context argument). If that caller's domain has the - * specified permission, no further checking is done and + * specified permission and at least one limiting permission argument (if any) + * implies the requested permission, no further checking is done and * <code>checkPermission</code> * returns quietly, indicating that the requested access is allowed. * If that domain does not have the specified permission, an exception - * is thrown, as usual. + * is thrown, as usual. If the caller's domain had the specified permission + * but it was not implied by any limiting permission arguments given in the call + * to <code>doPrivileged</code> then the permission checking continues + * until there are no more callers or another <code>doPrivileged</code> + * call matches the requested permission and returns normally. * * <p> The normal use of the "privileged" feature is as follows. If you * don't need to return a value from within the "privileged" block, do @@ -180,6 +191,9 @@ * * <p> Be *very* careful in your use of the "privileged" construct, and * always remember to make the privileged code section as small as possible. + * You can pass <code>Permission</code> arguments to further limit the + * scope of the "privilege" (see below). + * * * <p> Note that <code>checkPermission</code> always performs security checks * within the context of the currently executing thread. @@ -215,7 +229,9 @@ * * <p> There are also times where you don't know a priori which permissions * to check the context against. In these cases you can use the - * doPrivileged method that takes a context: + * doPrivileged method that takes a context. You can also limit the scope + * of the privileged code by passing additional <code>Permission</code> + * parameters. * * <pre> {@code * somemethod() { @@ -223,12 +239,21 @@ * public Object run() { * // Code goes here. Any permission checks within this * // run method will require that the intersection of the - * // callers protection domain and the snapshot's - * // context have the desired permission. + * // caller's protection domain and the snapshot's + * // context have the desired permission. If a requested + * // permission is not implied by the limiting FilePermission + * // argument then checking of the thread continues beyond the + * // caller of doPrivileged. * } - * }, acc); + * }, acc, new FilePermission("/temp/*", read)); * ...normal code here... * }}</pre> + * <p> Passing a limiting <code>Permission</code> argument of an instance of + * <code>AllPermission</code> is equivalent to calling the equivalent + * <code>doPrivileged</code> method without limiting <code>Permission</code> + * arguments. Passing a zero length array of <code>Permission</code> disables + * the code privileges so that checking always continues beyond the caller of + * that <code>doPrivileged</code> method. * * @see AccessControlContext * @@ -334,6 +359,112 @@ public static native <T> T doPrivileged(PrivilegedAction<T> action, AccessControlContext context); + + /** + * Performs the specified <code>PrivilegedAction</code> with privileges + * enabled and restricted by the specified + * <code>AccessControlContext</code> and with a privilege scope limited + * by specified <code>Permission</code> arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * <code>AccessControlContext</code>. + * <p> + * If the action's <code>run</code> method throws an (unchecked) exception, + * it will propagate through this method. + * + * @param action the action to be performed. + * @param context an <i>access control context</i> + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * <code>null</code>, + * then no additional restriction is applied. + * @param perms the <code>Permission</code> arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's <code>run</code> method. + * + * @throws NullPointerException if action or perms or any element of + * perms is <code>null</code> + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedExceptionAction,AccessControlContext) + * + * @since 1.8 + */ + @CallerSensitive + public static <T> T doPrivileged(PrivilegedAction<T> action, + AccessControlContext context, Permission... perms) { + + AccessControlContext parent = getContext(); + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class <?> caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(null, + caller, parent, context, perms)); + } + + + /** + * Performs the specified <code>PrivilegedAction</code> with privileges + * enabled and restricted by the specified + * <code>AccessControlContext</code> and with a privilege scope limited + * by specified <code>Permission</code> arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * <code>AccessControlContext</code>. + * <p> + * If the action's <code>run</code> method throws an (unchecked) exception, + * it will propagate through this method. + * + * <p> This method preserves the current AccessControlContext's + * DomainCombiner (which may be null) while the action is performed. + * + * @param action the action to be performed. + * @param context an <i>access control context</i> + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * <code>null</code>, + * then no additional restriction is applied. + * @param perms the <code>Permission</code> arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's <code>run</code> method. + * + * @throws NullPointerException if action or perms or any element of + * perms is <code>null</code> + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedExceptionAction,AccessControlContext) + * @see java.security.DomainCombiner + * + * @since 1.8 + */ + @CallerSensitive + public static <T> T doPrivilegedWithCombiner(PrivilegedAction<T> action, + AccessControlContext context, Permission... perms) { + + AccessControlContext parent = getContext(); + DomainCombiner dc = parent.getCombiner(); + if (dc == null && context != null) { + dc = context.getCombiner(); + } + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class <?> caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(dc, caller, + parent, context, perms)); + } + /** * Performs the specified <code>PrivilegedExceptionAction</code> with * privileges enabled. The action is performed with <i>all</i> of the @@ -408,6 +539,22 @@ private static AccessControlContext preserveCombiner(DomainCombiner combiner, Class<?> caller) { + return createWrapper(combiner, caller, null, null, null); + } + + /** + * Create a wrapper to contain the limited privilege scope data. + */ + private static AccessControlContext + createWrapper(DomainCombiner combiner, Class<?> caller, + AccessControlContext parent, AccessControlContext context, + Permission[] perms) + { + return new AccessControlContext(getCallerPD(caller), combiner, parent, + context, perms); + } + + private static ProtectionDomain getCallerPD(final Class <?> caller) { ProtectionDomain callerPd = doPrivileged (new PrivilegedAction<ProtectionDomain>() { public ProtectionDomain run() { @@ -415,18 +562,9 @@ } }); - // perform 'combine' on the caller of doPrivileged, - // even if the caller is from the bootclasspath - ProtectionDomain[] pds = new ProtectionDomain[] {callerPd}; - if (combiner == null) { - return new AccessControlContext(pds); - } else { - return new AccessControlContext(combiner.combine(pds, null), - combiner); - } + return callerPd; } - /** * Performs the specified <code>PrivilegedExceptionAction</code> with * privileges enabled and restricted by the specified @@ -454,7 +592,7 @@ * @exception NullPointerException if the action is <code>null</code> * * @see #doPrivileged(PrivilegedAction) - * @see #doPrivileged(PrivilegedExceptionAction,AccessControlContext) + * @see #doPrivileged(PrivilegedAction,AccessControlContext) */ @CallerSensitive public static native <T> T @@ -462,6 +600,118 @@ AccessControlContext context) throws PrivilegedActionException; + + /** + * Performs the specified <code>PrivilegedExceptionAction</code> with + * privileges enabled and restricted by the specified + * <code>AccessControlContext</code> and with a privilege scope limited by + * specified <code>Permission</code> arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * <code>AccessControlContext</code>. + * <p> + * If the action's <code>run</code> method throws an (unchecked) exception, + * it will propagate through this method. + * + * @param action the action to be performed. + * @param context an <i>access control context</i> + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * <code>null</code>, + * then no additional restriction is applied. + * @param perms the <code>Permission</code> arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's <code>run</code> method. + * + * @throws PrivilegedActionException if the specified action's + * <code>run</code> method threw a <i>checked</i> exception + * @throws NullPointerException if action or perms or any element of + * perms is <code>null</code> + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedAction,AccessControlContext) + * + * @since 1.8 + */ + @CallerSensitive + public static <T> T doPrivileged(PrivilegedExceptionAction<T> action, + AccessControlContext context, Permission... perms) + throws PrivilegedActionException + { + AccessControlContext parent = getContext(); + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class <?> caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(null, caller, parent, context, perms)); + } + + + /** + * Performs the specified <code>PrivilegedExceptionAction</code> with + * privileges enabled and restricted by the specified + * <code>AccessControlContext</code> and with a privilege scope limited by + * specified <code>Permission</code> arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * <code>AccessControlContext</code>. + * <p> + * If the action's <code>run</code> method throws an (unchecked) exception, + * it will propagate through this method. + * + * <p> This method preserves the current AccessControlContext's + * DomainCombiner (which may be null) while the action is performed. + * + * @param action the action to be performed. + * @param context an <i>access control context</i> + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * <code>null</code>, + * then no additional restriction is applied. + * @param perms the <code>Permission</code> arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's <code>run</code> method. + * + * @throws PrivilegedActionException if the specified action's + * <code>run</code> method threw a <i>checked</i> exception + * @throws NullPointerException if action or perms or any element of + * perms is <code>null</code> + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedAction,AccessControlContext) + * @see java.security.DomainCombiner + * + * @since 1.8 + */ + @CallerSensitive + public static <T> T doPrivilegedWithCombiner(PrivilegedExceptionAction<T> action, + AccessControlContext context, + Permission... perms) + throws PrivilegedActionException + { + AccessControlContext parent = getContext(); + DomainCombiner dc = parent.getCombiner(); + if (dc == null && context != null) { + dc = context.getCombiner(); + } + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class <?> caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(dc, caller, + parent, context, perms)); + } + /** * Returns the AccessControl context. i.e., it gets * the protection domains of all the callers on the stack, @@ -474,6 +724,7 @@ private static native AccessControlContext getStackAccessControlContext(); + /** * Returns the "inherited" AccessControl context. This is the context * that existed when the thread was created. Package private so @@ -484,9 +735,9 @@ /** * This method takes a "snapshot" of the current calling context, which - * includes the current Thread's inherited AccessControlContext, - * and places it in an AccessControlContext object. This context may then - * be checked at a later point, possibly in another thread. + * includes the current Thread's inherited AccessControlContext and any + * limited privilege scope, and places it in an AccessControlContext object. + * This context may then be checked at a later point, possibly in another thread. * * @see AccessControlContext * @@ -524,7 +775,7 @@ */ public static void checkPermission(Permission perm) - throws AccessControlException + throws AccessControlException { //System.err.println("checkPermission "+perm); //Thread.currentThread().dumpStack();
--- a/src/share/classes/java/security/DigestOutputStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/security/DigestOutputStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 1999, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,10 +112,10 @@ * @see MessageDigest#update(byte) */ public void write(int b) throws IOException { + out.write(b); if (on) { digest.update((byte)b); } - out.write(b); } /** @@ -142,10 +142,10 @@ * @see MessageDigest#update(byte[], int, int) */ public void write(byte[] b, int off, int len) throws IOException { + out.write(b, off, len); if (on) { digest.update(b, off, len); } - out.write(b, off, len); } /**
--- a/src/share/classes/java/util/HashMap.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/HashMap.java Wed Jun 05 13:10:11 2013 -0300 @@ -26,6 +26,8 @@ package java.util; import java.io.*; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; import java.util.function.Consumer; import java.util.function.BiFunction; import java.util.function.Function; @@ -126,7 +128,7 @@ */ public class HashMap<K,V> - extends AbstractMap<K,V> + extends AbstractMap<K,V> implements Map<K,V>, Cloneable, Serializable { @@ -150,12 +152,12 @@ /** * An empty table instance to share when the table is not inflated. */ - static final Entry<?,?>[] EMPTY_TABLE = {}; + static final Object[] EMPTY_TABLE = {}; /** * The table, resized as necessary. Length MUST Always be a power of two. */ - transient Entry<?,?>[] table = EMPTY_TABLE; + transient Object[] table = EMPTY_TABLE; /** * The number of key-value mappings contained in this map. @@ -186,10 +188,10 @@ */ transient int modCount; + /** + * Holds values which can't be initialized until after VM is booted. + */ private static class Holder { - /** - * - */ static final sun.misc.Unsafe UNSAFE; /** @@ -198,22 +200,616 @@ */ static final long HASHSEED_OFFSET; + static final boolean USE_HASHSEED; + static { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - HashMap.class.getDeclaredField("hashSeed")); - } catch (NoSuchFieldException | SecurityException e) { - throw new InternalError("Failed to record hashSeed offset", e); + String hashSeedProp = java.security.AccessController.doPrivileged( + new sun.security.action.GetPropertyAction( + "jdk.map.useRandomSeed")); + boolean localBool = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + USE_HASHSEED = localBool; + + if (USE_HASHSEED) { + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + HASHSEED_OFFSET = UNSAFE.objectFieldOffset( + HashMap.class.getDeclaredField("hashSeed")); + } catch (NoSuchFieldException | SecurityException e) { + throw new InternalError("Failed to record hashSeed offset", e); + } + } else { + UNSAFE = null; + HASHSEED_OFFSET = 0; } } } - /** + /* * A randomizing value associated with this instance that is applied to * hash code of keys to make hash collisions harder to find. + * + * Non-final so it can be set lazily, but be sure not to set more than once. */ - transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + transient final int hashSeed; + + /* + * TreeBin/TreeNode code from CHM doesn't handle the null key. Store the + * null key entry here. + */ + transient Entry<K,V> nullKeyEntry = null; + + /* + * In order to improve performance under high hash-collision conditions, + * HashMap will switch to storing a bin's entries in a balanced tree + * (TreeBin) instead of a linked-list once the number of entries in the bin + * passes a certain threshold (TreeBin.TREE_THRESHOLD), if at least one of + * the keys in the bin implements Comparable. This technique is borrowed + * from ConcurrentHashMap. + */ + + /* + * Code based on CHMv8 + * + * Node type for TreeBin + */ + final static class TreeNode<K,V> { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + final HashMap.Entry<K,V> entry; + + TreeNode(HashMap.Entry<K,V> entry, Object next, TreeNode parent) { + this.entry = entry; + this.entry.next = next; + this.parent = parent; + } + } + + /** + * Returns a Class for the given object of the form "class C + * implements Comparable<C>", if one exists, else null. See the TreeBin + * docs, below, for explanation. + */ + static Class<?> comparableClassFor(Object x) { + Class<?> c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p; + if ((c = x.getClass()) == String.class) // bypass checks + return c; + if ((cmpc = Comparable.class).isAssignableFrom(c)) { + while (cmpc.isAssignableFrom(s = c.getSuperclass())) + c = s; // find topmost comparable class + if ((ts = c.getGenericInterfaces()) != null) { + for (int i = 0; i < ts.length; ++i) { + if (((t = ts[i]) instanceof ParameterizedType) && + ((p = (ParameterizedType)t).getRawType() == cmpc) && + (as = p.getActualTypeArguments()) != null && + as.length == 1 && as[0] == c) // type arg is c + return c; + } + } + } + return null; + } + + /* + * Code based on CHMv8 + * + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable<T> + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by Comparable.compareTo order if applicable. On lookup at a + * node, if elements are not comparable or compare as 0 then both + * left and right children may need to be searched in the case of + * tied hash values. (This corresponds to the full list search + * that would be necessary if all elements were non-Comparable and + * had tied hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + */ + final class TreeBin { + /* + * The bin count threshold for using a tree rather than list for a bin. The + * value reflects the approximate break-even point for using tree-based + * operations. + */ + static final int TREE_THRESHOLD = 16; + + TreeNode<K,V> root; // root of tree + TreeNode<K,V> first; // head of next-pointer list + + /* + * Split a TreeBin into lo and hi parts and install in given table. + * + * Existing Entrys are re-used, which maintains the before/after links for + * LinkedHashMap.Entry. + * + * No check for Comparable, though this is the same as CHM. + */ + final void splitTreeBin(Object[] newTable, int i, TreeBin loTree, TreeBin hiTree) { + TreeBin oldTree = this; + int bit = newTable.length >>> 1; + int loCount = 0, hiCount = 0; + TreeNode<K,V> e = oldTree.first; + TreeNode<K,V> next; + + // This method is called when the table has just increased capacity, + // so indexFor() is now taking one additional bit of hash into + // account ("bit"). Entries in this TreeBin now belong in one of + // two bins, "i" or "i+bit", depending on if the new top bit of the + // hash is set. The trees for the two bins are loTree and hiTree. + // If either tree ends up containing fewer than TREE_THRESHOLD + // entries, it is converted back to a linked list. + while (e != null) { + // Save entry.next - it will get overwritten in putTreeNode() + next = (TreeNode<K,V>)e.entry.next; + + int h = e.entry.hash; + K k = (K) e.entry.key; + V v = e.entry.value; + if ((h & bit) == 0) { + ++loCount; + // Re-using e.entry + loTree.putTreeNode(h, k, v, e.entry); + } else { + ++hiCount; + hiTree.putTreeNode(h, k, v, e.entry); + } + // Iterate using the saved 'next' + e = next; + } + if (loCount < TREE_THRESHOLD) { // too small, convert back to list + HashMap.Entry loEntry = null; + TreeNode<K,V> p = loTree.first; + while (p != null) { + @SuppressWarnings("unchecked") + TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next; + p.entry.next = loEntry; + loEntry = p.entry; + p = savedNext; + } + // assert newTable[i] == null; + newTable[i] = loEntry; + } else { + // assert newTable[i] == null; + newTable[i] = loTree; + } + if (hiCount < TREE_THRESHOLD) { // too small, convert back to list + HashMap.Entry hiEntry = null; + TreeNode<K,V> p = hiTree.first; + while (p != null) { + @SuppressWarnings("unchecked") + TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next; + p.entry.next = hiEntry; + hiEntry = p.entry; + p = savedNext; + } + // assert newTable[i + bit] == null; + newTable[i + bit] = hiEntry; + } else { + // assert newTable[i + bit] == null; + newTable[i + bit] = hiTree; + } + } + + /* + * Popuplate the TreeBin with entries from the linked list e + * + * Assumes 'this' is a new/empty TreeBin + * + * Note: no check for Comparable + * Note: I believe this changes iteration order + */ + @SuppressWarnings("unchecked") + void populate(HashMap.Entry e) { + // assert root == null; + // assert first == null; + HashMap.Entry next; + while (e != null) { + // Save entry.next - it will get overwritten in putTreeNode() + next = (HashMap.Entry)e.next; + // Re-using Entry e will maintain before/after in LinkedHM + putTreeNode(e.hash, (K)e.key, (V)e.value, e); + // Iterate using the saved 'next' + e = next; + } + } + + /** + * Copied from CHMv8 + * From CLR + */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) { + rl.parent = p; + } + if ((pp = r.parent = p.parent) == null) { + root = r; + } else if (pp.left == p) { + pp.left = r; + } else { + pp.right = r; + } + r.left = p; + p.parent = r; + } + } + + /** + * Copied from CHMv8 + * From CLR + */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) { + lr.parent = p; + } + if ((pp = l.parent = p.parent) == null) { + root = l; + } else if (pp.right == p) { + pp.right = l; + } else { + pp.left = l; + } + l.right = p; + p.parent = l; + } + } + + /** + * Returns the TreeNode (or null if not found) for the given + * key. A front-end for recursive version. + */ + final TreeNode getTreeNode(int h, K k) { + return getTreeNode(h, k, root, comparableClassFor(k)); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") + final TreeNode getTreeNode (int h, K k, TreeNode p, Class<?> cc) { + // assert k != null; + while (p != null) { + int dir, ph; Object pk; + if ((ph = p.entry.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.entry.key) == k || k.equals(pk)) + return p; + else if (cc == null || comparableClassFor(pk) != cc || + (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { + // assert pk != null; + TreeNode r, pl, pr; // check both sides + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else if ((pl = p.left) != null) + dir = -1; + else // nothing there + break; + } + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + /* + * Finds or adds a node. + * + * 'entry' should be used to recycle an existing Entry (e.g. in the case + * of converting a linked-list bin to a TreeBin). + * If entry is null, a new Entry will be created for the new TreeNode + * + * @return the TreeNode containing the mapping, or null if a new + * TreeNode was added + */ + @SuppressWarnings("unchecked") + TreeNode putTreeNode(int h, K k, V v, HashMap.Entry<K,V> entry) { + // assert k != null; + //if (entry != null) { + // assert h == entry.hash; + // assert k == entry.key; + // assert v == entry.value; + // } + Class<?> cc = comparableClassFor(k); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; Object pk; + p = pp; + if ((ph = p.entry.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.entry.key) == k || k.equals(pk)) + return p; + else if (cc == null || comparableClassFor(pk) != cc || + (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { + TreeNode r, pr; + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else // continue left + dir = -1; + } + pp = (dir > 0) ? p.right : p.left; + } + + // Didn't find the mapping in the tree, so add it + TreeNode f = first; + TreeNode x; + if (entry != null) { + x = new TreeNode(entry, f, p); + } else { + x = new TreeNode(newEntry(h, k, v, null), f, p); + } + first = x; + + if (p == null) { + root = x; + } else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) { + f.prev = x; + } + if (dir <= 0) { + p.left = x; + } else { + p.right = x; + } + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red + && (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) { + r.red = false; + } + } + return null; + } + + /* + * From CHMv8 + * + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode) p.entry.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) { + first = next; + } else { + pred.entry.next = next; + } + if (next != null) { + next.prev = pred; + } + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + { + s = sl; + } + boolean c = s.red; + s.red = p.red; + p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) { + sp.left = p; + } else { + sp.right = p; + } + } + if ((s.right = pr) != null) { + pr.parent = s; + } + } + p.left = null; + if ((p.right = sr) != null) { + sr.parent = p; + } + if ((s.left = pl) != null) { + pl.parent = s; + } + if ((s.parent = pp) == null) { + root = s; + } else if (p == pp.left) { + pp.left = s; + } else { + pp.right = s; + } + replacement = sr; + } else { + replacement = (pl != null) ? pl : pr; + } + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } else { + replacement.parent = pp; + if (pp == null) { + root = replacement; + } else if (p == pp.left) { + pp.left = replacement; + } else { + pp.right = replacement; + } + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) { + x = xp; + } else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) + && (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } else { + if (sr == null || !sr.red) { + if (sl != null) { + sl.red = false; + } + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? + null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) { + sr.red = false; + } + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) { + x = xp; + } else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) + && (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } else { + if (sl == null || !sl.red) { + if (sr != null) { + sr.red = false; + } + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? + null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) { + sl.red = false; + } + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + { + pp.left = null; + } else if (p == pp.right) { + pp.right = null; + } + p.parent = null; + } + } + } /** * Constructs an empty <tt>HashMap</tt> with the specified initial @@ -233,9 +829,9 @@ if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new IllegalArgumentException("Illegal load factor: " + loadFactor); - this.loadFactor = loadFactor; threshold = initialCapacity; + hashSeed = initHashSeed(); init(); } @@ -269,10 +865,11 @@ */ public HashMap(Map<? extends K, ? extends V> m) { this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); + DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); inflateTable(threshold); putAllForCreate(m); + // assert size == m.size(); } private static int roundUpToPowerOf2(int number) { @@ -294,7 +891,7 @@ int capacity = roundUpToPowerOf2(toSize); threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1); - table = new Entry[capacity]; + table = new Object[capacity]; } // internal utilities @@ -310,17 +907,24 @@ } /** + * Return an initial value for the hashSeed, or 0 if the random seed is not + * enabled. + */ + final int initHashSeed() { + if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { + return sun.misc.Hashing.randomHashSeed(this); + } + return 0; + } + + /** * Retrieve object hash code and applies a supplemental hash function to the - * result hash, which defends against poor quality hash functions. This is + * result hash, which defends against poor quality hash functions. This is * critical because HashMap uses power-of-two length hash tables, that * otherwise encounter collisions for hashCodes that do not differ * in lower bits. */ final int hash(Object k) { - if (k instanceof String) { - return ((String) k).hash32(); - } - int h = hashSeed ^ k.hashCode(); // This function ensures that hashCodes that differ only by @@ -409,19 +1013,35 @@ if (isEmpty()) { return null; } + if (key == null) { + return nullKeyEntry; + } + int hash = hash(key); + int bin = indexFor(hash, table.length); - int hash = (key == null) ? 0 : hash(key); - for (Entry<?,?> e = table[indexFor(hash, table.length)]; - e != null; - e = e.next) { - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) - return (Entry<K,V>)e; + if (table[bin] instanceof Entry) { + Entry<K,V> e = (Entry<K,V>) table[bin]; + for (; e != null; e = (Entry<K,V>)e.next) { + Object k; + if (e.hash == hash && + ((k = e.key) == key || key.equals(k))) { + return e; + } + } + } else if (table[bin] != null) { + TreeBin e = (TreeBin)table[bin]; + TreeNode p = e.getTreeNode(hash, (K)key); + if (p != null) { + // assert p.entry.hash == hash && p.entry.key.equals(key); + return (Entry<K,V>)p.entry; + } else { + return null; + } } return null; } + /** * Associates the specified value with the specified key in this map. * If the map previously contained a mapping for the key, the old @@ -434,28 +1054,57 @@ * (A <tt>null</tt> return can also indicate that the map * previously associated <tt>null</tt> with <tt>key</tt>.) */ + @SuppressWarnings("unchecked") public V put(K key, V value) { if (table == EMPTY_TABLE) { inflateTable(threshold); } - if (key == null) + if (key == null) return putForNullKey(value); int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> e = (Entry<K,V>)table[i]; - for(; e != null; e = e.next) { - Object k; - if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); - return oldValue; + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + + if (table[i] instanceof Entry) { + // Bin contains ordinary Entries. Search for key in the linked list + // of entries, counting the number of entries. Only check for + // TreeBin conversion if the list size is >= TREE_THRESHOLD. + // (The conversion still may not happen if the table gets resized.) + int listSize = 0; + Entry<K,V> e = (Entry<K,V>) table[i]; + for (; e != null; e = (Entry<K,V>)e.next) { + Object k; + if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { + V oldValue = e.value; + e.value = value; + e.recordAccess(this); + return oldValue; + } + listSize++; + } + // Didn't find, so fall through and call addEntry() to add the + // Entry and check for TreeBin conversion. + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p == null) { // putTreeNode() added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + return null; + } else { // putTreeNode() found an existing node + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + V oldVal = pEntry.value; + pEntry.value = value; + pEntry.recordAccess(this); + return oldVal; } } - modCount++; - addEntry(hash, key, value, i); + addEntry(hash, key, value, i, checkIfNeedTree); return null; } @@ -463,47 +1112,79 @@ * Offloaded version of put for null keys */ private V putForNullKey(V value) { - @SuppressWarnings("unchecked") - Entry<K,V> e = (Entry<K,V>)table[0]; - for(; e != null; e = e.next) { - if (e.key == null) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); - return oldValue; - } + if (nullKeyEntry != null) { + V oldValue = nullKeyEntry.value; + nullKeyEntry.value = value; + nullKeyEntry.recordAccess(this); + return oldValue; } modCount++; - addEntry(0, null, value, 0); + size++; // newEntry() skips size++ + nullKeyEntry = newEntry(0, null, value, null); return null; } + private void putForCreateNullKey(V value) { + // Look for preexisting entry for key. This will never happen for + // clone or deserialize. It will only happen for construction if the + // input Map is a sorted map whose ordering is inconsistent w/ equals. + if (nullKeyEntry != null) { + nullKeyEntry.value = value; + } else { + nullKeyEntry = newEntry(0, null, value, null); + size++; + } + } + + /** * This method is used instead of put by constructors and * pseudoconstructors (clone, readObject). It does not resize the table, - * check for comodification, etc. It calls createEntry rather than - * addEntry. + * check for comodification, etc, though it will convert bins to TreeBins + * as needed. It calls createEntry rather than addEntry. */ + @SuppressWarnings("unchecked") private void putForCreate(K key, V value) { - int hash = null == key ? 0 : hash(key); + if (null == key) { + putForCreateNullKey(value); + return; + } + int hash = hash(key); int i = indexFor(hash, table.length); + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? /** * Look for preexisting entry for key. This will never happen for * clone or deserialize. It will only happen for construction if the * input Map is a sorted map whose ordering is inconsistent w/ equals. */ - for (@SuppressWarnings("unchecked") - Entry<?,V> e = (Entry<?,V>)table[i]; e != null; e = e.next) { - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) { - e.value = value; - return; + if (table[i] instanceof Entry) { + int listSize = 0; + Entry<K,V> e = (Entry<K,V>) table[i]; + for (; e != null; e = (Entry<K,V>)e.next) { + Object k; + if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { + e.value = value; + return; + } + listSize++; } + // Didn't find, fall through to createEntry(). + // Check for conversion to TreeBin done via createEntry(). + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p != null) { + p.entry.setValue(value); // Found an existing node, set value + } else { + size++; // Added a new TreeNode, so update size + } + // don't need modCount++/check for resize - just return + return; } - createEntry(hash, key, value, i); + createEntry(hash, key, value, i, checkIfNeedTree); } private void putAllForCreate(Map<? extends K, ? extends V> m) { @@ -526,14 +1207,14 @@ * is irrelevant). */ void resize(int newCapacity) { - Entry<?,?>[] oldTable = table; + Object[] oldTable = table; int oldCapacity = oldTable.length; if (oldCapacity == MAXIMUM_CAPACITY) { threshold = Integer.MAX_VALUE; return; } - Entry<?,?>[] newTable = new Entry<?,?>[newCapacity]; + Object[] newTable = new Object[newCapacity]; transfer(newTable); table = newTable; threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1); @@ -541,19 +1222,31 @@ /** * Transfers all entries from current table to newTable. + * + * Assumes newTable is larger than table */ @SuppressWarnings("unchecked") - void transfer(Entry<?,?>[] newTable) { - Entry<?,?>[] src = table; + void transfer(Object[] newTable) { + Object[] src = table; + // assert newTable.length > src.length : "newTable.length(" + + // newTable.length + ") expected to be > src.length("+src.length+")"; int newCapacity = newTable.length; - for (int j = 0; j < src.length; j++ ) { - Entry<K,V> e = (Entry<K,V>) src[j]; - while(null != e) { - Entry<K,V> next = e.next; - int i = indexFor(e.hash, newCapacity); - e.next = (Entry<K,V>) newTable[i]; - newTable[i] = e; - e = next; + for (int j = 0; j < src.length; j++) { + if (src[j] instanceof Entry) { + // Assume: since wasn't TreeBin before, won't need TreeBin now + Entry<K,V> e = (Entry<K,V>) src[j]; + while (null != e) { + Entry<K,V> next = (Entry<K,V>)e.next; + int i = indexFor(e.hash, newCapacity); + e.next = (Entry<K,V>) newTable[i]; + newTable[i] = e; + e = next; + } + } else if (src[j] != null) { + TreeBin e = (TreeBin) src[j]; + TreeBin loTree = new TreeBin(); + TreeBin hiTree = new TreeBin(); + e.splitTreeBin(newTable, j, loTree, hiTree); } } Arrays.fill(table, null); @@ -585,20 +1278,13 @@ * By using the conservative calculation, we subject ourself * to at most one extra resize. */ - if (numKeysToBeAdded > threshold) { - int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1); - if (targetCapacity > MAXIMUM_CAPACITY) - targetCapacity = MAXIMUM_CAPACITY; - int newCapacity = table.length; - while (newCapacity < targetCapacity) - newCapacity <<= 1; - if (newCapacity > table.length) - resize(newCapacity); + if (numKeysToBeAdded > threshold && table.length < MAXIMUM_CAPACITY) { + resize(table.length * 2); } for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) put(e.getKey(), e.getValue()); - } + } /** * Removes the mapping for the specified key from this map if present. @@ -621,24 +1307,57 @@ if (table == EMPTY_TABLE) { inflateTable(threshold); } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> e = (Entry<K,V>)table[i]; - for(; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - if(e.value != null) { - return e.value; - } - e.value = value; - modCount++; - e.recordAccess(this); + if (key == null) { + if (nullKeyEntry == null || nullKeyEntry.value == null) { + putForNullKey(value); return null; + } else { + return nullKeyEntry.value; } } + int hash = hash(key); + int i = indexFor(hash, table.length); + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + if (table[i] instanceof Entry) { + int listSize = 0; + Entry<K,V> e = (Entry<K,V>) table[i]; + for (; e != null; e = (Entry<K,V>)e.next) { + if (e.hash == hash && Objects.equals(e.key, key)) { + if (e.value != null) { + return e.value; + } + e.value = value; + e.recordAccess(this); + return null; + } + listSize++; + } + // Didn't find, so fall through and call addEntry() to add the + // Entry and check for TreeBin conversion. + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p == null) { // not found, putTreeNode() added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + return null; + } else { // putTreeNode() found an existing node + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + V oldVal = pEntry.value; + if (oldVal == null) { // only replace if maps to null + pEntry.value = value; + pEntry.recordAccess(this); + } + return oldVal; + } + } modCount++; - addEntry(hash, key, value, i); + addEntry(hash, key, value, i, checkIfNeedTree); return null; } @@ -647,31 +1366,61 @@ if (isEmpty()) { return false; } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> prev = (Entry<K,V>)table[i]; - Entry<K,V> e = prev; - - while (e != null) { - Entry<K,V> next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - if (!Objects.equals(e.value, value)) { - return false; - } - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); + if (key == null) { + if (nullKeyEntry != null && + Objects.equals(nullKeyEntry.value, value)) { + removeNullKey(); return true; } - prev = e; - e = next; + return false; } + int hash = hash(key); + int i = indexFor(hash, table.length); + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry<K,V> prev = (Entry<K,V>) table[i]; + Entry<K,V> e = prev; + while (e != null) { + @SuppressWarnings("unchecked") + Entry<K,V> next = (Entry<K,V>) e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + if (!Objects.equals(e.value, value)) { + return false; + } + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + return true; + } + prev = e; + e = next; + } + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, (K)key); + if (p != null) { + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + // assert pEntry.key.equals(key); + if (Objects.equals(pEntry.value, value)) { + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + return true; + } + } + } return false; } @@ -680,39 +1429,82 @@ if (isEmpty()) { return false; } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> e = (Entry<K,V>)table[i]; - for (; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { - e.value = newValue; - e.recordAccess(this); + if (key == null) { + if (nullKeyEntry != null && + Objects.equals(nullKeyEntry.value, oldValue)) { + putForNullKey(newValue); return true; } + return false; } + int hash = hash(key); + int i = indexFor(hash, table.length); + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry<K,V> e = (Entry<K,V>) table[i]; + for (; e != null; e = (Entry<K,V>)e.next) { + if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { + e.value = newValue; + e.recordAccess(this); + return true; + } + } + return false; + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, key); + if (p != null) { + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + // assert pEntry.key.equals(key); + if (Objects.equals(pEntry.value, oldValue)) { + pEntry.value = newValue; + pEntry.recordAccess(this); + return true; + } + } + } return false; } - @Override + @Override public V replace(K key, V value) { if (isEmpty()) { return null; } - int hash = (key == null) ? 0 : hash(key); + if (key == null) { + if (nullKeyEntry != null) { + return putForNullKey(value); + } + return null; + } + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> e = (Entry<K,V>)table[i]; - for (; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry<K,V> e = (Entry<K,V>)table[i]; + for (; e != null; e = (Entry<K,V>)e.next) { + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + e.value = value; + e.recordAccess(this); + return oldValue; + } + } + + return null; + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, key); + if (p != null) { + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + // assert pEntry.key.equals(key); + V oldValue = pEntry.value; + pEntry.value = value; + pEntry.recordAccess(this); return oldValue; } } - return null; } @@ -721,21 +1513,75 @@ if (table == EMPTY_TABLE) { inflateTable(threshold); } - int hash = (key == null) ? 0 : hash(key); + if (key == null) { + if (nullKeyEntry == null || nullKeyEntry.value == null) { + V newValue = mappingFunction.apply(key); + if (newValue != null) { + putForNullKey(newValue); + } + return newValue; + } + return nullKeyEntry.value; + } + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> e = (Entry<K,V>)table[i]; - for (; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - return oldValue == null ? (e.value = mappingFunction.apply(key)) : oldValue; + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + + if (table[i] instanceof Entry) { + int listSize = 0; + @SuppressWarnings("unchecked") + Entry<K,V> e = (Entry<K,V>)table[i]; + for (; e != null; e = (Entry<K,V>)e.next) { + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + if (oldValue == null) { + V newValue = mappingFunction.apply(key); + if (newValue != null) { + e.value = newValue; + e.recordAccess(this); + } + return newValue; + } + return oldValue; + } + listSize++; + } + // Didn't find, fall through to call the mapping function + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + V value = mappingFunction.apply(key); + if (value == null) { // Return the existing value, if any + TreeNode p = e.getTreeNode(hash, key); + if (p != null) { + return (V) p.entry.value; + } + return null; + } else { // Put the new value into the Tree, if absent + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p == null) { // not found, new node was added + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + return value; + } else { // putTreeNode() found an existing node + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + V oldVal = pEntry.value; + if (oldVal == null) { // only replace if maps to null + pEntry.value = value; + pEntry.recordAccess(this); + return value; + } + return oldVal; + } } } - V newValue = mappingFunction.apply(key); - if (newValue != null) { + if (newValue != null) { // add Entry and check for TreeBin conversion modCount++; - addEntry(hash, key, newValue, i); + addEntry(hash, key, newValue, i, checkIfNeedTree); } return newValue; @@ -746,59 +1592,34 @@ if (isEmpty()) { return null; } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> prev = (Entry<K,V>)table[i]; - Entry<K,V> e = prev; - - while (e != null) { - Entry<K,V> next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - if (oldValue == null) - break; + if (key == null) { + V oldValue; + if (nullKeyEntry != null && (oldValue = nullKeyEntry.value) != null) { V newValue = remappingFunction.apply(key, oldValue); - modCount++; - if (newValue == null) { - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); + if (newValue != null ) { + putForNullKey(newValue); + return newValue; } else { - e.value = newValue; - e.recordAccess(this); + removeNullKey(); } - return newValue; } - prev = e; - e = next; - } - - return null; - } - - @Override - public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { - if (table == EMPTY_TABLE) { - inflateTable(threshold); + return null; } - int hash = (key == null) ? 0 : hash(key); + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> prev = (Entry<K,V>)table[i]; - Entry<K,V> e = prev; - - while (e != null) { - Entry<K,V> next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - V newValue = remappingFunction.apply(key, oldValue); - if (newValue != oldValue) { - modCount++; + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry<K,V> prev = (Entry<K,V>)table[i]; + Entry<K,V> e = prev; + while (e != null) { + Entry<K,V> next = (Entry<K,V>)e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + if (oldValue == null) + break; + V newValue = remappingFunction.apply(key, oldValue); if (newValue == null) { + modCount++; size--; if (prev == e) table[i] = next; @@ -809,17 +1630,136 @@ e.value = newValue; e.recordAccess(this); } + return newValue; } - return newValue; + prev = e; + e = next; + } + } else if (table[i] != null) { + TreeBin tb = (TreeBin)table[i]; + TreeNode p = tb.getTreeNode(hash, key); + if (p != null) { + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + // assert pEntry.key.equals(key); + V oldValue = pEntry.value; + if (oldValue != null) { + V newValue = remappingFunction.apply(key, oldValue); + if (newValue == null) { // remove mapping + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + } else { + pEntry.value = newValue; + pEntry.recordAccess(this); + } + return newValue; + } + } + } + return null; + } + + @Override + public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { + if (table == EMPTY_TABLE) { + inflateTable(threshold); + } + if (key == null) { + V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != oldValue) { + if (newValue == null) { + removeNullKey(); + } else { + putForNullKey(newValue); + } } - prev = e; - e = next; + return newValue; + } + int hash = hash(key); + int i = indexFor(hash, table.length); + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + + if (table[i] instanceof Entry) { + int listSize = 0; + @SuppressWarnings("unchecked") + Entry<K,V> prev = (Entry<K,V>)table[i]; + Entry<K,V> e = prev; + + while (e != null) { + Entry<K,V> next = (Entry<K,V>)e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != oldValue) { + if (newValue == null) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + } else { + e.value = newValue; + e.recordAccess(this); + } + } + return newValue; + } + prev = e; + e = next; + listSize++; + } + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin tb = (TreeBin)table[i]; + TreeNode p = tb.getTreeNode(hash, key); + V oldValue = p == null ? null : (V)p.entry.value; + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != oldValue) { + if (newValue == null) { + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + } else { + if (p != null) { // just update the value + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + pEntry.value = newValue; + pEntry.recordAccess(this); + } else { // need to put new node + p = tb.putTreeNode(hash, key, newValue, null); + // assert p == null; // should have added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + } + } + } + return newValue; } V newValue = remappingFunction.apply(key, null); if (newValue != null) { modCount++; - addEntry(hash, key, newValue, i); + addEntry(hash, key, newValue, i, checkIfNeedTree); } return newValue; @@ -830,40 +1770,96 @@ if (table == EMPTY_TABLE) { inflateTable(threshold); } - int hash = (key == null) ? 0 : hash(key); + if (key == null) { + V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; + V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value); + if (newValue != null) { + putForNullKey(newValue); + } else if (nullKeyEntry != null) { + removeNullKey(); + } + return newValue; + } + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> prev = (Entry<K,V>)table[i]; - Entry<K,V> e = prev; + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + + if (table[i] instanceof Entry) { + int listSize = 0; + @SuppressWarnings("unchecked") + Entry<K,V> prev = (Entry<K,V>)table[i]; + Entry<K,V> e = prev; - while (e != null) { - Entry<K,V> next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - V newValue = remappingFunction.apply(oldValue, value); - modCount++; - if (newValue == null) { + while (e != null) { + Entry<K,V> next = (Entry<K,V>)e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + V newValue = (oldValue == null) ? value : + remappingFunction.apply(oldValue, value); + if (newValue == null) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + } else { + e.value = newValue; + e.recordAccess(this); + } + return newValue; + } + prev = e; + e = next; + listSize++; + } + // Didn't find, so fall through and (maybe) call addEntry() to add + // the Entry and check for TreeBin conversion. + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin tb = (TreeBin)table[i]; + TreeNode p = tb.getTreeNode(hash, key); + V oldValue = p == null ? null : (V)p.entry.value; + V newValue = (oldValue == null) ? value : + remappingFunction.apply(oldValue, value); + if (newValue == null) { + if (p != null) { + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + modCount++; size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - } else { - e.value = newValue; - e.recordAccess(this); + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } } - return newValue; + return null; + } else if (newValue != oldValue) { + if (p != null) { // just update the value + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + pEntry.value = newValue; + pEntry.recordAccess(this); + } else { // need to put new node + p = tb.putTreeNode(hash, key, newValue, null); + // assert p == null; // should have added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + } } - prev = e; - e = next; + return newValue; } - if (value != null) { modCount++; - addEntry(hash, key, value, i); + addEntry(hash, key, value, i, checkIfNeedTree); } - return value; } @@ -873,36 +1869,65 @@ * Removes and returns the entry associated with the specified key * in the HashMap. Returns null if the HashMap contains no mapping * for this key. + * + * We don't bother converting TreeBins back to Entry lists if the bin falls + * back below TREE_THRESHOLD, but we do clear bins when removing the last + * TreeNode in a TreeBin. */ final Entry<K,V> removeEntryForKey(Object key) { if (isEmpty()) { return null; } - int hash = (key == null) ? 0 : hash(key); + if (key == null) { + if (nullKeyEntry != null) { + return removeNullKey(); + } + return null; + } + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") + + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") Entry<K,V> prev = (Entry<K,V>)table[i]; - Entry<K,V> e = prev; + Entry<K,V> e = prev; - while (e != null) { - Entry<K,V> next = e.next; - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) { + while (e != null) { + @SuppressWarnings("unchecked") + Entry<K,V> next = (Entry<K,V>) e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, (K)key); + if (p != null) { + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + // assert pEntry.key.equals(key); modCount++; size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return e; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + return pEntry; } - prev = e; - e = next; } - - return e; + return null; } /** @@ -915,29 +1940,75 @@ Map.Entry<?,?> entry = (Map.Entry<?,?>) o; Object key = entry.getKey(); - int hash = (key == null) ? 0 : hash(key); + + if (key == null) { + if (entry.equals(nullKeyEntry)) { + return removeNullKey(); + } + return null; + } + + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry<K,V> prev = (Entry<K,V>)table[i]; - Entry<K,V> e = prev; + + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry<K,V> prev = (Entry<K,V>)table[i]; + Entry<K,V> e = prev; - while (e != null) { - Entry<K,V> next = e.next; - if (e.hash == hash && e.equals(entry)) { + while (e != null) { + @SuppressWarnings("unchecked") + Entry<K,V> next = (Entry<K,V>)e.next; + if (e.hash == hash && e.equals(entry)) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, (K)key); + if (p != null && p.entry.equals(entry)) { + @SuppressWarnings("unchecked") + Entry<K,V> pEntry = (Entry<K,V>)p.entry; + // assert pEntry.key.equals(key); modCount++; size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return e; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + return pEntry; } - prev = e; - e = next; } + return null; + } - return e; + /* + * Remove the mapping for the null key, and update internal accounting + * (size, modcount, recordRemoval, etc). + * + * Assumes nullKeyEntry is non-null. + */ + private Entry<K,V> removeNullKey() { + // assert nullKeyEntry != null; + Entry<K,V> retVal = nullKeyEntry; + modCount++; + size--; + retVal.recordRemoval(this); + nullKeyEntry = null; + return retVal; } /** @@ -946,6 +2017,9 @@ */ public void clear() { modCount++; + if (nullKeyEntry != null) { + nullKeyEntry = null; + } Arrays.fill(table, null); size = 0; } @@ -959,27 +2033,58 @@ * specified value */ public boolean containsValue(Object value) { - if (value == null) + if (value == null) { return containsNullValue(); - - Entry<?,?>[] tab = table; - for (int i = 0; i < tab.length; i++) - for (Entry<?,?> e = tab[i]; e != null; e = e.next) - if (value.equals(e.value)) - return true; - return false; + } + Object[] tab = table; + for (int i = 0; i < tab.length; i++) { + if (tab[i] instanceof Entry) { + Entry<?,?> e = (Entry<?,?>)tab[i]; + for (; e != null; e = (Entry<?,?>)e.next) { + if (value.equals(e.value)) { + return true; + } + } + } else if (tab[i] != null) { + TreeBin e = (TreeBin)tab[i]; + TreeNode p = e.first; + for (; p != null; p = (TreeNode) p.entry.next) { + if (value == p.entry.value || value.equals(p.entry.value)) { + return true; + } + } + } + } + // Didn't find value in table - could be in nullKeyEntry + return (nullKeyEntry != null && (value == nullKeyEntry.value || + value.equals(nullKeyEntry.value))); } /** * Special-case code for containsValue with null argument */ private boolean containsNullValue() { - Entry<?,?>[] tab = table; - for (int i = 0; i < tab.length; i++) - for (Entry<?,?> e = tab[i]; e != null; e = e.next) - if (e.value == null) - return true; - return false; + Object[] tab = table; + for (int i = 0; i < tab.length; i++) { + if (tab[i] instanceof Entry) { + Entry<K,V> e = (Entry<K,V>)tab[i]; + for (; e != null; e = (Entry<K,V>)e.next) { + if (e.value == null) { + return true; + } + } + } else if (tab[i] != null) { + TreeBin e = (TreeBin)tab[i]; + TreeNode p = e.first; + for (; p != null; p = (TreeNode) p.entry.next) { + if (p.entry.value == null) { + return true; + } + } + } + } + // Didn't find value in table - could be in nullKeyEntry + return (nullKeyEntry != null && nullKeyEntry.value == null); } /** @@ -1007,6 +2112,7 @@ result.entrySet = null; result.modCount = 0; result.size = 0; + result.nullKeyEntry = null; result.init(); result.putAllForCreate(this); @@ -1016,13 +2122,13 @@ static class Entry<K,V> implements Map.Entry<K,V> { final K key; V value; - Entry<K,V> next; + Object next; // an Entry, or a TreeNode final int hash; /** * Creates new entry. */ - Entry(int h, K k, V v, Entry<K,V> n) { + Entry(int h, K k, V v, Object n) { value = v; next = n; key = k; @@ -1054,7 +2160,7 @@ Object v2 = e.getValue(); if (v1 == v2 || (v1 != null && v1.equals(v2))) return true; - } + } return false; } @@ -1068,8 +2174,7 @@ /** * This method is invoked whenever the value in an entry is - * overwritten by an invocation of put(k,v) for a key k that's already - * in the HashMap. + * overwritten for a key that's already in the HashMap. */ void recordAccess(HashMap<K,V> m) { } @@ -1082,50 +2187,96 @@ } } + void addEntry(int hash, K key, V value, int bucketIndex) { + addEntry(hash, key, value, bucketIndex, true); + } + /** * Adds a new entry with the specified key, value and hash code to * the specified bucket. It is the responsibility of this - * method to resize the table if appropriate. + * method to resize the table if appropriate. The new entry is then + * created by calling createEntry(). * * Subclass overrides this to alter the behavior of put method. + * + * If checkIfNeedTree is false, it is known that this bucket will not need + * to be converted to a TreeBin, so don't bothering checking. + * + * Assumes key is not null. */ - void addEntry(int hash, K key, V value, int bucketIndex) { + void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { + // assert key != null; if ((size >= threshold) && (null != table[bucketIndex])) { resize(2 * table.length); - hash = (null != key) ? hash(key) : 0; + hash = hash(key); bucketIndex = indexFor(hash, table.length); } - - createEntry(hash, key, value, bucketIndex); + createEntry(hash, key, value, bucketIndex, checkIfNeedTree); } /** - * Like addEntry except that this version is used when creating entries + * Called by addEntry(), and also used when creating entries * as part of Map construction or "pseudo-construction" (cloning, - * deserialization). This version needn't worry about resizing the table. + * deserialization). This version does not check for resizing of the table. * - * Subclass overrides this to alter the behavior of HashMap(Map), - * clone, and readObject. + * This method is responsible for converting a bucket to a TreeBin once + * TREE_THRESHOLD is reached. However if checkIfNeedTree is false, it is known + * that this bucket will not need to be converted to a TreeBin, so don't + * bother checking. The new entry is constructed by calling newEntry(). + * + * Assumes key is not null. + * + * Note: buckets already converted to a TreeBin don't call this method, but + * instead call TreeBin.putTreeNode() to create new entries. */ - void createEntry(int hash, K key, V value, int bucketIndex) { + void createEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { + // assert key != null; @SuppressWarnings("unchecked") Entry<K,V> e = (Entry<K,V>)table[bucketIndex]; - table[bucketIndex] = new Entry<>(hash, key, value, e); + table[bucketIndex] = newEntry(hash, key, value, e); size++; + + if (checkIfNeedTree) { + int listSize = 0; + for (e = (Entry<K,V>) table[bucketIndex]; e != null; e = (Entry<K,V>)e.next) { + listSize++; + if (listSize >= TreeBin.TREE_THRESHOLD) { // Convert to TreeBin + if (comparableClassFor(key) != null) { + TreeBin t = new TreeBin(); + t.populate((Entry)table[bucketIndex]); + table[bucketIndex] = t; + } + break; + } + } + } } + /* + * Factory method to create a new Entry object. + */ + Entry<K,V> newEntry(int hash, K key, V value, Object next) { + return new HashMap.Entry<>(hash, key, value, next); + } + + private abstract class HashIterator<E> implements Iterator<E> { - Entry<?,?> next; // next entry to return + Object next; // next entry to return, an Entry or a TreeNode int expectedModCount; // For fast-fail int index; // current slot - Entry<?,?> current; // current entry + Object current; // current entry, an Entry or a TreeNode HashIterator() { expectedModCount = modCount; if (size > 0) { // advance to first entry - Entry<?,?>[] t = table; - while (index < t.length && (next = t[index++]) == null) - ; + if (nullKeyEntry != null) { + // assert nullKeyEntry.next == null; + // This works with nextEntry(): nullKeyEntry isa Entry, and + // e.next will be null, so we'll hit the findNextBin() call. + next = nullKeyEntry; + } else { + findNextBin(); + } } } @@ -1135,19 +2286,28 @@ @SuppressWarnings("unchecked") final Entry<K,V> nextEntry() { - if (modCount != expectedModCount) + if (modCount != expectedModCount) { throw new ConcurrentModificationException(); - Entry<?,?> e = next; + } + Object e = next; + Entry<K,V> retVal; + if (e == null) throw new NoSuchElementException(); - if ((next = e.next) == null) { - Entry<?,?>[] t = table; - while (index < t.length && (next = t[index++]) == null) - ; + if (e instanceof Entry) { + retVal = (Entry<K,V>)e; + next = ((Entry<K,V>)e).next; + } else { // TreeBin + retVal = (Entry<K,V>)((TreeNode)e).entry; + next = retVal.next; + } + + if (next == null) { // Move to next bin + findNextBin(); } current = e; - return (Entry<K,V>)e; + return retVal; } public void remove() { @@ -1155,11 +2315,33 @@ throw new IllegalStateException(); if (modCount != expectedModCount) throw new ConcurrentModificationException(); - Object k = current.key; + K k; + + if (current instanceof Entry) { + k = ((Entry<K,V>)current).key; + } else { + k = ((Entry<K,V>)((TreeNode)current).entry).key; + + } current = null; HashMap.this.removeEntryForKey(k); expectedModCount = modCount; } + + /* + * Set 'next' to the first entry of the next non-empty bin in the table + */ + private void findNextBin() { + // assert next == null; + Object[] t = table; + + while (index < t.length && (next = t[index++]) == null) + ; + if (next instanceof HashMap.TreeBin) { // Point to the first TreeNode + next = ((TreeBin) next).first; + // assert next != null; // There should be no empty TreeBins + } + } } private final class ValueIterator extends HashIterator<V> { @@ -1357,7 +2539,7 @@ if (table==EMPTY_TABLE) { s.writeInt(roundUpToPowerOf2(threshold)); } else { - s.writeInt(table.length); + s.writeInt(table.length); } // Write out size (number of Mappings) @@ -1389,8 +2571,10 @@ } // set other fields that need values - Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, - sun.misc.Hashing.randomHashSeed(this)); + if (Holder.USE_HASHSEED) { + Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, + sun.misc.Hashing.randomHashSeed(this)); + } table = EMPTY_TABLE; // Read in number of buckets @@ -1404,9 +2588,9 @@ // capacity chosen by number of mappings and desired load (if >= 0.25) int capacity = (int) Math.min( - mappings * Math.min(1 / loadFactor, 4.0f), - // we have limits... - HashMap.MAXIMUM_CAPACITY); + mappings * Math.min(1 / loadFactor, 4.0f), + // we have limits... + HashMap.MAXIMUM_CAPACITY); // allocate the bucket array; if (mappings > 0) { @@ -1420,9 +2604,9 @@ // Read the keys and values, and put the mappings in the HashMap for (int i=0; i<mappings; i++) { @SuppressWarnings("unchecked") - K key = (K) s.readObject(); + K key = (K) s.readObject(); @SuppressWarnings("unchecked") - V value = (V) s.readObject(); + V value = (V) s.readObject(); putForCreate(key, value); } } @@ -1436,11 +2620,17 @@ */ static class HashMapSpliterator<K,V> { final HashMap<K,V> map; - HashMap.Entry<K,V> current; // current node + Object current; // current node, can be Entry or TreeNode int index; // current index, modified on advance/split int fence; // one past last index int est; // size estimate int expectedModCount; // for comodification checks + boolean acceptedNull; // Have we accepted the null key? + // Without this, we can't distinguish + // between being at the very beginning (and + // needing to accept null), or being at the + // end of the list in bin 0. In both cases, + // current == null && index == 0. HashMapSpliterator(HashMap<K,V> m, int origin, int fence, int est, @@ -1450,6 +2640,7 @@ this.fence = fence; this.est = est; this.expectedModCount = expectedModCount; + this.acceptedNull = false; } final int getFence() { // initialize fence and size on first use @@ -1479,9 +2670,15 @@ public KeySpliterator<K,V> trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - return (lo >= mid || current != null) ? null : - new KeySpliterator<K,V>(map, lo, index = mid, est >>>= 1, - expectedModCount); + if (lo >= mid || current != null) { + return null; + } else { + KeySpliterator<K,V> retVal = new KeySpliterator<K,V>(map, lo, + index = mid, est >>>= 1, expectedModCount); + // Only 'this' Spliterator chould check for null. + retVal.acceptedNull = true; + return retVal; + } } @SuppressWarnings("unchecked") @@ -1490,21 +2687,39 @@ if (action == null) throw new NullPointerException(); HashMap<K,V> m = map; - HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])m.table; + Object[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = tab.length; } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) { - HashMap.Entry<K,V> p = current; + + if (!acceptedNull) { + acceptedNull = true; + if (m.nullKeyEntry != null) { + action.accept(m.nullKeyEntry.key); + } + } + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + Object p = current; + current = null; do { - if (p == null) + if (p == null) { p = tab[i++]; - else { - action.accept(p.getKey()); - p = p.next; + if (p instanceof HashMap.TreeBin) { + p = ((HashMap.TreeBin)p).first; + } + } else { + HashMap.Entry<K,V> entry; + if (p instanceof HashMap.Entry) { + entry = (HashMap.Entry<K,V>)p; + } else { + entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; + } + action.accept(entry.key); + p = entry.next; } } while (p != null || i < hi); if (m.modCount != mc) @@ -1517,14 +2732,34 @@ int hi; if (action == null) throw new NullPointerException(); - HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])map.table; - if (tab.length >= (hi = getFence()) && index >= 0) { + Object[] tab = map.table; + hi = getFence(); + + if (!acceptedNull) { + acceptedNull = true; + if (map.nullKeyEntry != null) { + action.accept(map.nullKeyEntry.key); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + if (tab.length >= hi && index >= 0) { while (current != null || index < hi) { - if (current == null) + if (current == null) { current = tab[index++]; - else { - K k = current.getKey(); - current = current.next; + if (current instanceof HashMap.TreeBin) { + current = ((HashMap.TreeBin)current).first; + } + } else { + HashMap.Entry<K,V> entry; + if (current instanceof HashMap.Entry) { + entry = (HashMap.Entry<K,V>)current; + } else { + entry = (HashMap.Entry<K,V>)((TreeNode)current).entry; + } + K k = entry.key; + current = entry.next; action.accept(k); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -1551,9 +2786,15 @@ public ValueSpliterator<K,V> trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - return (lo >= mid || current != null) ? null : - new ValueSpliterator<K,V>(map, lo, index = mid, est >>>= 1, - expectedModCount); + if (lo >= mid || current != null) { + return null; + } else { + ValueSpliterator<K,V> retVal = new ValueSpliterator<K,V>(map, + lo, index = mid, est >>>= 1, expectedModCount); + // Only 'this' Spliterator chould check for null. + retVal.acceptedNull = true; + return retVal; + } } @SuppressWarnings("unchecked") @@ -1562,21 +2803,39 @@ if (action == null) throw new NullPointerException(); HashMap<K,V> m = map; - HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])m.table; + Object[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = tab.length; } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) { - HashMap.Entry<K,V> p = current; + + if (!acceptedNull) { + acceptedNull = true; + if (m.nullKeyEntry != null) { + action.accept(m.nullKeyEntry.value); + } + } + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + Object p = current; + current = null; do { - if (p == null) + if (p == null) { p = tab[i++]; - else { - action.accept(p.getValue()); - p = p.next; + if (p instanceof HashMap.TreeBin) { + p = ((HashMap.TreeBin)p).first; + } + } else { + HashMap.Entry<K,V> entry; + if (p instanceof HashMap.Entry) { + entry = (HashMap.Entry<K,V>)p; + } else { + entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; + } + action.accept(entry.value); + p = entry.next; } } while (p != null || i < hi); if (m.modCount != mc) @@ -1589,14 +2848,34 @@ int hi; if (action == null) throw new NullPointerException(); - HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])map.table; - if (tab.length >= (hi = getFence()) && index >= 0) { + Object[] tab = map.table; + hi = getFence(); + + if (!acceptedNull) { + acceptedNull = true; + if (map.nullKeyEntry != null) { + action.accept(map.nullKeyEntry.value); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + if (tab.length >= hi && index >= 0) { while (current != null || index < hi) { - if (current == null) + if (current == null) { current = tab[index++]; - else { - V v = current.getValue(); - current = current.next; + if (current instanceof HashMap.TreeBin) { + current = ((HashMap.TreeBin)current).first; + } + } else { + HashMap.Entry<K,V> entry; + if (current instanceof HashMap.Entry) { + entry = (Entry<K,V>)current; + } else { + entry = (Entry<K,V>)((TreeNode)current).entry; + } + V v = entry.value; + current = entry.next; action.accept(v); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -1622,9 +2901,15 @@ public EntrySpliterator<K,V> trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - return (lo >= mid || current != null) ? null : - new EntrySpliterator<K,V>(map, lo, index = mid, est >>>= 1, - expectedModCount); + if (lo >= mid || current != null) { + return null; + } else { + EntrySpliterator<K,V> retVal = new EntrySpliterator<K,V>(map, + lo, index = mid, est >>>= 1, expectedModCount); + // Only 'this' Spliterator chould check for null. + retVal.acceptedNull = true; + return retVal; + } } @SuppressWarnings("unchecked") @@ -1633,21 +2918,40 @@ if (action == null) throw new NullPointerException(); HashMap<K,V> m = map; - HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])m.table; + Object[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = tab.length; } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) { - HashMap.Entry<K,V> p = current; + + if (!acceptedNull) { + acceptedNull = true; + if (m.nullKeyEntry != null) { + action.accept(m.nullKeyEntry); + } + } + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + Object p = current; + current = null; do { - if (p == null) + if (p == null) { p = tab[i++]; - else { - action.accept(p); - p = p.next; + if (p instanceof HashMap.TreeBin) { + p = ((HashMap.TreeBin)p).first; + } + } else { + HashMap.Entry<K,V> entry; + if (p instanceof HashMap.Entry) { + entry = (HashMap.Entry<K,V>)p; + } else { + entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; + } + action.accept(entry); + p = entry.next; + } } while (p != null || i < hi); if (m.modCount != mc) @@ -1660,14 +2964,33 @@ int hi; if (action == null) throw new NullPointerException(); - HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])map.table; - if (tab.length >= (hi = getFence()) && index >= 0) { + Object[] tab = map.table; + hi = getFence(); + + if (!acceptedNull) { + acceptedNull = true; + if (map.nullKeyEntry != null) { + action.accept(map.nullKeyEntry); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + if (tab.length >= hi && index >= 0) { while (current != null || index < hi) { - if (current == null) + if (current == null) { current = tab[index++]; - else { - HashMap.Entry<K,V> e = current; - current = current.next; + if (current instanceof HashMap.TreeBin) { + current = ((HashMap.TreeBin)current).first; + } + } else { + HashMap.Entry<K,V> e; + if (current instanceof HashMap.Entry) { + e = (Entry<K,V>)current; + } else { + e = (Entry<K,V>)((TreeNode)current).entry; + } + current = e.next; action.accept(e); if (map.modCount != expectedModCount) throw new ConcurrentModificationException();
--- a/src/share/classes/java/util/Hashtable.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/Hashtable.java Wed Jun 05 13:10:11 2013 -0300 @@ -180,13 +180,27 @@ */ static final long HASHSEED_OFFSET; + static final boolean USE_HASHSEED; + static { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - Hashtable.class.getDeclaredField("hashSeed")); - } catch (NoSuchFieldException | SecurityException e) { - throw new InternalError("Failed to record hashSeed offset", e); + String hashSeedProp = java.security.AccessController.doPrivileged( + new sun.security.action.GetPropertyAction( + "jdk.map.useRandomSeed")); + boolean localBool = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + USE_HASHSEED = localBool; + + if (USE_HASHSEED) { + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + HASHSEED_OFFSET = UNSAFE.objectFieldOffset( + Hashtable.class.getDeclaredField("hashSeed")); + } catch (NoSuchFieldException | SecurityException e) { + throw new InternalError("Failed to record hashSeed offset", e); + } + } else { + UNSAFE = null; + HASHSEED_OFFSET = 0; } } } @@ -194,21 +208,24 @@ /** * A randomizing value associated with this instance that is applied to * hash code of keys to make hash collisions harder to find. + * + * Non-final so it can be set lazily, but be sure not to set more than once. */ - transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + transient final int hashSeed; + + /** + * Return an initial value for the hashSeed, or 0 if the random seed is not + * enabled. + */ + final int initHashSeed() { + if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { + return sun.misc.Hashing.randomHashSeed(this); + } + return 0; + } private int hash(Object k) { - if (k instanceof String) { - return ((String)k).hash32(); - } - - int h = hashSeed ^ k.hashCode(); - - // This function ensures that hashCodes that differ only by - // constant multiples at each bit position have a bounded - // number of collisions (approximately 8 at default load factor). - h ^= (h >>> 20) ^ (h >>> 12); - return h ^ (h >>> 7) ^ (h >>> 4); + return hashSeed ^ k.hashCode(); } /** @@ -232,6 +249,7 @@ this.loadFactor = loadFactor; table = new Entry<?,?>[initialCapacity]; threshold = (int)Math.min(initialCapacity * loadFactor, MAX_ARRAY_SIZE + 1); + hashSeed = initHashSeed(); } /** @@ -1187,8 +1205,10 @@ s.defaultReadObject(); // set hashMask - Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, - sun.misc.Hashing.randomHashSeed(this)); + if (Holder.USE_HASHSEED) { + Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, + sun.misc.Hashing.randomHashSeed(this)); + } // Read the original length of the array and number of elements int origlength = s.readInt();
--- a/src/share/classes/java/util/IntSummaryStatistics.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/IntSummaryStatistics.java Wed Jun 05 13:10:11 2013 -0300 @@ -159,7 +159,7 @@ */ public String toString() { return String.format( - "%s{count=%d, sum=%d, min=%d, average=%d, max=%d}", + "%s{count=%d, sum=%d, min=%d, average=%f, max=%d}", this.getClass().getSimpleName(), getCount(), getSum(),
--- a/src/share/classes/java/util/LinkedHashMap.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/LinkedHashMap.java Wed Jun 05 13:10:11 2013 -0300 @@ -55,9 +55,9 @@ * order they were presented.) * * <p>A special {@link #LinkedHashMap(int,float,boolean) constructor} is - * provided to create a linked hash map whose order of iteration is the order - * in which its entries were last accessed, from least-recently accessed to - * most-recently (<i>access-order</i>). This kind of map is well-suited to + * provided to create a <tt>LinkedHashMap</tt> whose order of iteration is the + * order in which its entries were last accessed, from least-recently accessed + * to most-recently (<i>access-order</i>). This kind of map is well-suited to * building LRU caches. Invoking the <tt>put</tt> or <tt>get</tt> method * results in an access to the corresponding entry (assuming it exists after * the invocation completes). The <tt>putAll</tt> method generates one entry @@ -243,23 +243,6 @@ } /** - * Transfers all entries to new table array. This method is called - * by superclass resize. It is overridden for performance, as it is - * faster to iterate using our linked list. - */ - @Override - @SuppressWarnings("unchecked") - void transfer(HashMap.Entry[] newTable) { - int newCapacity = newTable.length; - for (Entry<K,V> e = header.after; e != header; e = e.after) { - int index = indexFor(e.hash, newCapacity); - e.next = (HashMap.Entry<K,V>)newTable[index]; - newTable[index] = e; - } - } - - - /** * Returns <tt>true</tt> if this map maps one or more keys to the * specified value. * @@ -320,7 +303,7 @@ // These fields comprise the doubly linked list used for iteration. Entry<K,V> before, after; - Entry(int hash, K key, V value, HashMap.Entry<K,V> next) { + Entry(int hash, K key, V value, Object next) { super(hash, key, value, next); } @@ -344,7 +327,7 @@ /** * This method is invoked by the superclass whenever the value - * of a pre-existing entry is read by Map.get or modified by Map.set. + * of a pre-existing entry is read by Map.get or modified by Map.put. * If the enclosing Map is access-ordered, it moves the entry * to the end of the list; otherwise, it does nothing. */ @@ -422,8 +405,9 @@ * allocated entry to get inserted at the end of the linked list and * removes the eldest entry if appropriate. */ - void addEntry(int hash, K key, V value, int bucketIndex) { - super.addEntry(hash, key, value, bucketIndex); + @Override + void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { + super.addEntry(hash, key, value, bucketIndex, checkIfNeedTree); // Remove eldest entry if instructed Entry<K,V> eldest = header.after; @@ -432,17 +416,14 @@ } } - /** - * This override differs from addEntry in that it doesn't resize the - * table or remove the eldest entry. + /* + * Create a new LinkedHashMap.Entry and setup the before/after pointers */ - void createEntry(int hash, K key, V value, int bucketIndex) { - @SuppressWarnings("unchecked") - HashMap.Entry<K,V> old = (HashMap.Entry<K,V>)table[bucketIndex]; - Entry<K,V> e = new Entry<>(hash, key, value, old); - table[bucketIndex] = e; - e.addBefore(header); - size++; + @Override + HashMap.Entry<K,V> newEntry(int hash, K key, V value, Object next) { + Entry<K,V> newEntry = new Entry<>(hash, key, value, next); + newEntry.addBefore(header); + return newEntry; } /**
--- a/src/share/classes/java/util/LongSummaryStatistics.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/LongSummaryStatistics.java Wed Jun 05 13:10:11 2013 -0300 @@ -171,7 +171,7 @@ */ public String toString() { return String.format( - "%s{count=%d, sum=%d, min=%d, average=%d, max=%d}", + "%s{count=%d, sum=%d, min=%d, average=%f, max=%d}", this.getClass().getSimpleName(), getCount(), getSum(),
--- a/src/share/classes/java/util/PrimitiveIterator.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/PrimitiveIterator.java Wed Jun 05 13:10:11 2013 -0300 @@ -91,6 +91,7 @@ * @throws NullPointerException if the specified action is null */ default void forEachRemaining(IntConsumer action) { + Objects.requireNonNull(action); while (hasNext()) action.accept(nextInt()); } @@ -123,6 +124,8 @@ forEachRemaining((IntConsumer) action); } else { + // The method reference action::accept is never null + Objects.requireNonNull(action); if (Tripwire.ENABLED) Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfInt.forEachRemainingInt(action::accept)"); forEachRemaining((IntConsumer) action::accept); @@ -162,6 +165,7 @@ * @throws NullPointerException if the specified action is null */ default void forEachRemaining(LongConsumer action) { + Objects.requireNonNull(action); while (hasNext()) action.accept(nextLong()); } @@ -194,6 +198,8 @@ forEachRemaining((LongConsumer) action); } else { + // The method reference action::accept is never null + Objects.requireNonNull(action); if (Tripwire.ENABLED) Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfLong.forEachRemainingLong(action::accept)"); forEachRemaining((LongConsumer) action::accept); @@ -232,6 +238,7 @@ * @throws NullPointerException if the specified action is null */ default void forEachRemaining(DoubleConsumer action) { + Objects.requireNonNull(action); while (hasNext()) action.accept(nextDouble()); } @@ -265,6 +272,8 @@ forEachRemaining((DoubleConsumer) action); } else { + // The method reference action::accept is never null + Objects.requireNonNull(action); if (Tripwire.ENABLED) Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfDouble.forEachRemainingDouble(action::accept)"); forEachRemaining((DoubleConsumer) action::accept);
--- a/src/share/classes/java/util/Spliterator.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/Spliterator.java Wed Jun 05 13:10:11 2013 -0300 @@ -394,9 +394,9 @@ * Convenience method that returns {@link #estimateSize()} if this * Spliterator is {@link #SIZED}, else {@code -1}. * @implSpec - * The default returns the result of {@code estimateSize()} if the - * Spliterator reports a characteristic of {@code SIZED}, and {@code -1} - * otherwise. + * The default implementation returns the result of {@code estimateSize()} + * if the Spliterator reports a characteristic of {@code SIZED}, and + * {@code -1} otherwise. * * @return the exact size, if known, else {@code -1}. */
--- a/src/share/classes/java/util/StringJoiner.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/StringJoiner.java Wed Jun 05 13:10:11 2013 -0300 @@ -29,14 +29,6 @@ * by a delimiter and optionally starting with a supplied prefix * and ending with a supplied suffix. * <p> - * For example, the String {@code "[George:Sally:Fred]"} may - * be constructed as follows: - * <pre> {@code - * StringJoiner sj = new StringJoiner(":", "[", "]"); - * sj.add("George").add("Sally").add("Fred"); - * String desiredString = sj.toString(); - * }</pre> - * <p> * Prior to adding something to the {@code StringJoiner}, its * {@code sj.toString()} method will, by default, return {@code prefix + suffix}. * However, if the {@code setEmptyValue} method is called, the {@code emptyValue} @@ -45,17 +37,28 @@ * <code>"{}"</code>, where the {@code prefix} is <code>"{"</code>, the * {@code suffix} is <code>"}"</code> and nothing has been added to the * {@code StringJoiner}. - * <p> - * A {@code StringJoiner} may be employed to create formatted output from a - * collection using lambda expressions as shown in the following example. + * + * @apiNote + * <p>The String {@code "[George:Sally:Fred]"} may be constructed as follows: * * <pre> {@code - * List<Person> people = ... - * String commaSeparatedNames = - * people.map(p -> p.getName()).into(new StringJoiner(", ")).toString(); + * StringJoiner sj = new StringJoiner(":", "[", "]"); + * sj.add("George").add("Sally").add("Fred"); + * String desiredString = sj.toString(); + * }</pre> + * <p> + * A {@code StringJoiner} may be employed to create formatted output from a + * {@link java.util.stream.Stream} using + * {@link java.util.stream.Collectors#toStringJoiner}. For example: + * + * <pre> {@code + * List<Integer> numbers = Arrays.asList(1, 2, 3, 4); + * String commaSeparatedNumbers = numbers.stream() + * .map(i -> i.toString()) + * .collect(Collectors.toStringJoiner(", ")).toString(); * }</pre> * - * @author Jim Gish + * @see java.util.stream.Collectors#toStringJoiner * @since 1.8 */ public final class StringJoiner {
--- a/src/share/classes/java/util/WeakHashMap.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/WeakHashMap.java Wed Jun 05 13:10:11 2013 -0300 @@ -187,11 +187,37 @@ */ int modCount; + private static class Holder { + static final boolean USE_HASHSEED; + + static { + String hashSeedProp = java.security.AccessController.doPrivileged( + new sun.security.action.GetPropertyAction( + "jdk.map.useRandomSeed")); + boolean localBool = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + USE_HASHSEED = localBool; + } + } + /** * A randomizing value associated with this instance that is applied to * hash code of keys to make hash collisions harder to find. + * + * Non-final so it can be set lazily, but be sure not to set more than once. */ - transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + transient int hashSeed; + + /** + * Initialize the hashing mask value. + */ + final void initHashSeed() { + if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { + // Do not set hashSeed more than once! + // assert hashSeed == 0; + hashSeed = sun.misc.Hashing.randomHashSeed(this); + } + } @SuppressWarnings("unchecked") private Entry<K,V>[] newTable(int n) { @@ -223,6 +249,7 @@ table = newTable(capacity); this.loadFactor = loadFactor; threshold = (int)(capacity * loadFactor); + initHashSeed(); } /** @@ -298,10 +325,7 @@ * in lower bits. */ final int hash(Object k) { - if (k instanceof String) { - return ((String) k).hash32(); - } - int h = hashSeed ^ k.hashCode(); + int h = hashSeed ^ k.hashCode(); // This function ensures that hashCodes that differ only by // constant multiples at each bit position have a bounded @@ -1076,9 +1100,10 @@ } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < hi) { - index = hi; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { WeakHashMap.Entry<K,V> p = current; + current = null; // exhaust do { if (p == null) p = tab[i++]; @@ -1155,9 +1180,10 @@ } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < hi) { - index = hi; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { WeakHashMap.Entry<K,V> p = current; + current = null; // exhaust do { if (p == null) p = tab[i++]; @@ -1232,9 +1258,10 @@ } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < hi) { - index = hi; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { WeakHashMap.Entry<K,V> p = current; + current = null; // exhaust do { if (p == null) p = tab[i++];
--- a/src/share/classes/java/util/concurrent/ConcurrentHashMap.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/concurrent/ConcurrentHashMap.java Wed Jun 05 13:10:11 2013 -0300 @@ -34,14 +34,47 @@ */ package java.util.concurrent; -import java.io.ObjectInputStream; -import java.util.concurrent.locks.*; -import java.util.*; import java.io.Serializable; +import java.io.ObjectStreamField; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.AbstractMap; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.ConcurrentModificationException; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.Spliterator; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.StampedLock; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.BinaryOperator; +import java.util.function.Consumer; +import java.util.function.DoubleBinaryOperator; +import java.util.function.Function; +import java.util.function.IntBinaryOperator; +import java.util.function.LongBinaryOperator; +import java.util.function.ToDoubleBiFunction; +import java.util.function.ToDoubleFunction; +import java.util.function.ToIntBiFunction; +import java.util.function.ToIntFunction; +import java.util.function.ToLongBiFunction; +import java.util.function.ToLongFunction; +import java.util.stream.Stream; /** * A hash table supporting full concurrency of retrievals and - * adjustable expected concurrency for updates. This class obeys the + * high expected concurrency for updates. This class obeys the * same functional specification as {@link java.util.Hashtable}, and * includes versions of methods corresponding to each method of * {@code Hashtable}. However, even though all operations are @@ -51,35 +84,61 @@ * interoperable with {@code Hashtable} in programs that rely on its * thread safety but not on its synchronization details. * - * <p> Retrieval operations (including {@code get}) generally do not - * block, so may overlap with update operations (including - * {@code put} and {@code remove}). Retrievals reflect the results - * of the most recently <em>completed</em> update operations holding - * upon their onset. For aggregate operations such as {@code putAll} - * and {@code clear}, concurrent retrievals may reflect insertion or - * removal of only some entries. Similarly, Iterators and - * Enumerations return elements reflecting the state of the hash table - * at some point at or since the creation of the iterator/enumeration. - * They do <em>not</em> throw {@link ConcurrentModificationException}. - * However, iterators are designed to be used by only one thread at a time. + * <p>Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently <em>completed</em> update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * <em>happens-before</em> relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do <em>not</em> throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. * - * <p> The allowed concurrency among update operations is guided by - * the optional {@code concurrencyLevel} constructor argument - * (default {@code 16}), which is used as a hint for internal sizing. The - * table is internally partitioned to try to permit the indicated - * number of concurrent updates without contention. Because placement - * in hash tables is essentially random, the actual concurrency will - * vary. Ideally, you should choose a value to accommodate as many - * threads as will ever concurrently modify the table. Using a - * significantly higher value than you need can waste space and time, - * and a significantly lower value can lead to thread contention. But - * overestimates and underestimates within an order of magnitude do - * not usually have much noticeable impact. A value of one is - * appropriate when it is known that only one thread will modify and - * all others will only read. Also, resizing this or any other kind of - * hash table is a relatively slow operation, so, when possible, it is - * a good idea to provide estimates of expected table sizes in - * constructors. + * <p>The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. To ameliorate impact, when keys are {@link Comparable}, + * this class may use comparison order among keys to help break ties. + * + * <p>A {@link Set} projection of a ConcurrentHashMap may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + * <p>A ConcurrentHashMap can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link + * java.util.concurrent.atomic.LongAdder} values and initializing via + * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count + * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use + * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();} * * <p>This class and its views and iterators implement all of the * <em>optional</em> methods of the {@link Map} and {@link Iterator} @@ -88,6 +147,114 @@ * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class * does <em>not</em> allow {@code null} to be used as a key or value. * + * <p>ConcurrentHashMaps support a set of sequential and parallel bulk + * operations that, unlike most {@link Stream} methods, are designed + * to be safely, and often sensibly, applied even with maps that are + * being concurrently updated by other threads; for example, when + * computing a snapshot summary of the values in a shared registry. + * There are three kinds of operation, each with four forms, accepting + * functions with Keys, Values, Entries, and (Key, Value) arguments + * and/or return values. Because the elements of a ConcurrentHashMap + * are not ordered in any particular way, and may be processed in + * different orders in different parallel executions, the correctness + * of supplied functions should not depend on any ordering, or on any + * other objects or values that may transiently change while + * computation is in progress; and except for forEach actions, should + * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry} + * objects do not support method {@code setValue}. + * + * <ul> + * <li> forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.</li> + * + * <li> search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.</li> + * + * <li> reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + * <ul> + * + * <li> Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)</li> + * + * <li> Mapped reductions that accumulate the results of a given + * function applied to each element.</li> + * + * <li> Reductions to scalar doubles, longs, and ints, using a + * given basis value.</li> + * + * </ul> + * </li> + * </ul> + * + * <p>These bulk operations accept a {@code parallelismThreshold} + * argument. Methods proceed sequentially if the current map size is + * estimated to be less than the given threshold. Using a value of + * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value + * of {@code 1} results in maximal parallelism by partitioning into + * enough subtasks to fully utilize the {@link + * ForkJoinPool#commonPool()} that is used for all parallel + * computations. Normally, you would initially choose one of these + * extreme values, and then measure performance of using in-between + * values that trade off overhead versus throughput. + * + * <p>The concurrency properties of bulk operations follow + * from those of ConcurrentHashMap: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + * <p>Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + * <p>Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + * <p>Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + * <p>Speedups for parallel compared to sequential forms are common + * but not guaranteed. Parallel operations involving brief functions + * on small maps may execute more slowly than sequential forms if the + * underlying work to parallelize the computation is more expensive + * than the computation itself. Similarly, parallelization may not + * lead to much actual parallelism if all processors are busy + * performing unrelated tasks. + * + * <p>All arguments to all task methods must be non-null. + * * <p>This class is a member of the * <a href="{@docRoot}/../technotes/guides/collections/index.html"> * Java Collections Framework</a>. @@ -97,735 +264,2373 @@ * @param <K> the type of keys maintained by this map * @param <V> the type of mapped values */ -public class ConcurrentHashMap<K, V> extends AbstractMap<K, V> - implements ConcurrentMap<K, V>, Serializable { +@SuppressWarnings({"unchecked", "rawtypes", "serial"}) +public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> + implements ConcurrentMap<K,V>, Serializable { + private static final long serialVersionUID = 7249069246763182397L; /* - * The basic strategy is to subdivide the table among Segments, - * each of which itself is a concurrently readable hash table. To - * reduce footprint, all but one segments are constructed only - * when first needed (see ensureSegment). To maintain visibility - * in the presence of lazy construction, accesses to segments as - * well as elements of segment's table must use volatile access, - * which is done via Unsafe within methods segmentAt etc - * below. These provide the functionality of AtomicReferenceArrays - * but reduce the levels of indirection. Additionally, - * volatile-writes of table elements and entry "next" fields - * within locked operations use the cheaper "lazySet" forms of - * writes (via putOrderedObject) because these writes are always - * followed by lock releases that maintain sequential consistency - * of table updates. + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node key + * fields can contain special values, they are defined using plain + * Object types (not type "K"). This leads to a lot of explicit + * casting (and the use of class-wide warning suppressions). It + * also allows some of the public methods to be factored into a + * smaller number of internal methods (although sadly not so for + * the five variants of put-related operations). The + * validation-based approach explained below leads to a lot of + * code sprawl because retry-control precludes factoring into + * smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. + * + * We use the top (sign) bit of Node hash fields for control + * purposes -- it is available anyway because of addressing + * constraints. Nodes with negative hash fields are forwarding + * nodes to either TreeBins or resized tables. The lower 31 bits + * of each normal Node's hash field contain a transformation of + * the key's hash code. + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Locking support for these locks relies on builtin + * "synchronized" monitors. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. * - * Historical note: The previous version of this class relied - * heavily on "final" fields, which avoided some volatile reads at - * the expense of a large initial footprint. Some remnants of - * that design (including forced construction of segment 0) exist - * to ensure serialization compatibility. + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is at least twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Any thread + * noticing an overfull bin may assist in resizing after the + * initiating thread allocates and sets up the replacement + * array. However, rather than stalling, these other threads may + * proceed with insertions etc. The use of TreeBins shields us + * from the worst case effects of overfilling while resizes are in + * progress. Resizing proceeds by transferring bins, one by one, + * from the table to the next table. To enable concurrency, the + * next table must be (incrementally) prefilled with place-holders + * serving as reverse forwarders to the old table. Because we are + * using power-of-two expansion, the elements from each bin must + * either stay at same index, or move with a power of two + * offset. We eliminate unnecessary node creation by catching + * cases where old nodes can be reused because their next fields + * won't change. On average, only about one-sixth of them need + * cloning when a table doubles. The nodes they replace will be + * garbage collectable as soon as they are no longer referenced by + * any reader thread that may be in the midst of concurrently + * traversing table. Upon transfer, the old table bin contains + * only a special forwarding node (with hash field "MOVED") that + * contains the next table as its key. On encountering a + * forwarding node, access and update operations restart, using + * the new table. + * + * Each bin transfer requires its bin lock, which can stall + * waiting for locks while resizing. However, because other + * threads can join in and help resize rather than contend for + * locks, average aggregate waits become shorter as resizing + * progresses. The transfer operation must also ensure that all + * accessible bins in both the old and new table are usable by any + * traversal. This is arranged by proceeding from the last bin + * (table.length - 1) up towards the first. Upon seeing a + * forwarding node, traversals (see class Traverser) arrange to + * move to the new table without revisiting nodes. However, to + * ensure that no intervening nodes are skipped, bin splitting can + * only begin after the associated reverse-forwarders are in + * place. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a specialization of + * LongAdder. We need to incorporate a specialization rather than + * just use a LongAdder in order to access implicit + * contention-sensing that leads to creation of multiple + * Cells. The counter mechanics avoid contention on + * updates but can encounter cache thrashing if read too + * frequently during concurrent access. To avoid reading so often, + * resizing under contention is attempted only upon adding to a + * bin already holding two or more nodes. Under uniform hash + * distributions, the probability of this occurring at threshold + * is around 13%, meaning that only about 1 in 8 puts check + * threshold (and after resizing, many fewer do so). The bulk + * putAll operation further reduces contention by only committing + * count updates upon these size checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. */ /* ---------------- Constants -------------- */ /** - * The default initial capacity for this table, - * used when not otherwise specified in a constructor. + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. */ - static final int DEFAULT_INITIAL_CAPACITY = 16; + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; /** - * The default load factor for this table, used when not - * otherwise specified in a constructor. + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. */ - static final float DEFAULT_LOAD_FACTOR = 0.75f; + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; /** - * The default concurrency level for this table, used when not - * otherwise specified in a constructor. + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. */ - static final int DEFAULT_CONCURRENCY_LEVEL = 16; + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; /** - * The maximum capacity, used if a higher value is implicitly - * specified by either of the constructors with arguments. MUST - * be a power of two <= 1<<30 to ensure that entries are indexable - * using ints. + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. */ - static final int MAXIMUM_CAPACITY = 1 << 30; + private static final int TREE_THRESHOLD = 8; /** - * The minimum capacity for per-segment tables. Must be a power - * of two, at least two to avoid immediate resizing on next use - * after lazy construction. + * Minimum number of rebinnings per transfer step. Ranges are + * subdivided to allow multiple resizer threads. This value + * serves as a lower bound to avoid resizers encountering + * excessive memory contention. The value should be at least + * DEFAULT_CAPACITY. + */ + private static final int MIN_TRANSFER_STRIDE = 16; + + /* + * Encodings for Node hash fields. See above for explanation. */ - static final int MIN_SEGMENT_TABLE_CAPACITY = 2; + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash + + /** Number of CPUS, to place bounds on some sizings */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** For serialization compatibility. */ + private static final ObjectStreamField[] serialPersistentFields = { + new ObjectStreamField("segments", Segment[].class), + new ObjectStreamField("segmentMask", Integer.TYPE), + new ObjectStreamField("segmentShift", Integer.TYPE) + }; /** - * The maximum number of segments to allow; used to bound - * constructor arguments. Must be power of two less than 1 << 24. + * A padded cell for distributing counts. Adapted from LongAdder + * and Striped64. See their internal docs for explanation. */ - static final int MAX_SEGMENTS = 1 << 16; // slightly conservative - - /** - * Number of unsynchronized retries in size and containsValue - * methods before resorting to locking. This is used to avoid - * unbounded retries if tables undergo continuous modification - * which would make it impossible to obtain an accurate result. - */ - static final int RETRIES_BEFORE_LOCK = 2; + @sun.misc.Contended static final class Cell { + volatile long value; + Cell(long x) { value = x; } + } /* ---------------- Fields -------------- */ /** - * A randomizing value associated with this instance that is applied to - * hash code of keys to make hash collisions harder to find. + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node<K,V>[] table; + + /** + * The next table to use; non-null only while resizing. + */ + private transient volatile Node<K,V>[] nextTable; + + /** + * Base counter value, used mainly when there is no contention, + * but also as a fallback during table initialization + * races. Updated via CAS. + */ + private transient volatile long baseCount; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized: -1 for initialization, + * else -(1 + the number of active resizing threads). Otherwise, + * when table is null, holds the initial table size to use upon + * creation, or 0 for default. After initialization, holds the + * next element count value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + /** + * The next table index (plus one) to split while resizing. + */ + private transient volatile int transferIndex; + + /** + * The least available table index to split while resizing. + */ + private transient volatile int transferOrigin; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. */ - private transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + private transient volatile int cellsBusy; + + /** + * Table of counter cells. When non-null, size is a power of 2. + */ + private transient volatile Cell[] counterCells; + + // views + private transient KeySetView<K,V> keySet; + private transient ValuesView<K,V> values; + private transient EntrySetView<K,V> entrySet; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { + return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); + } + + static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, + Node<K,V> c, Node<K,V> v) { + return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); + } + + static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { + U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); + } + + /* ---------------- Nodes -------------- */ /** - * Mask value for indexing into segments. The upper bits of a - * key's hash code are used to choose the segment. + * Key-value entry. This class is never exported out as a + * user-mutable Map.Entry (i.e., one supporting setValue; see + * MapEntry below), but can be used for read-only traversals used + * in bulk tasks. Nodes with a hash field of MOVED are special, + * and do not contain user keys or values (and are never + * exported). Otherwise, keys and vals are never null. */ - final int segmentMask; + static class Node<K,V> implements Map.Entry<K,V> { + final int hash; + final Object key; + volatile V val; + Node<K,V> next; + + Node(int hash, Object key, V val, Node<K,V> next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + public final K getKey() { return (K)key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + public final V setValue(V value) { + throw new UnsupportedOperationException(); + } + + public final boolean equals(Object o) { + Object k, v, u; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == (u = val) || v.equals(u))); + } + } /** - * Shift value for indexing within segments. + * Exported Entry for EntryIterator */ - final int segmentShift; + static final class MapEntry<K,V> implements Map.Entry<K,V> { + final K key; // non-null + V val; // non-null + final ConcurrentHashMap<K,V> map; + MapEntry(K key, V val, ConcurrentHashMap<K,V> map) { + this.key = key; + this.val = val; + this.map = map; + } + public K getKey() { return key; } + public V getValue() { return val; } + public int hashCode() { return key.hashCode() ^ val.hashCode(); } + public String toString() { return key + "=" + val; } + + public boolean equals(Object o) { + Object k, v; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed, in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode<K,V> extends Node<K,V> { + TreeNode<K,V> parent; // red-black tree links + TreeNode<K,V> left; + TreeNode<K,V> right; + TreeNode<K,V> prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, V val, Node<K,V> next, + TreeNode<K,V> parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * Returns a Class for the given type of the form "class C + * implements Comparable<C>", if one exists, else null. See below + * for explanation. + */ + static Class<?> comparableClassFor(Class<?> c) { + Class<?> s, cmpc; Type[] ts, as; Type t; ParameterizedType p; + if (c == String.class) // bypass checks + return c; + if (c != null && (cmpc = Comparable.class).isAssignableFrom(c)) { + while (cmpc.isAssignableFrom(s = c.getSuperclass())) + c = s; // find topmost comparable class + if ((ts = c.getGenericInterfaces()) != null) { + for (int i = 0; i < ts.length; ++i) { + if (((t = ts[i]) instanceof ParameterizedType) && + ((p = (ParameterizedType)t).getRawType() == cmpc) && + (as = p.getActualTypeArguments()) != null && + as.length == 1 && as[0] == c) // type arg is c + return c; + } + } + } + return null; + } /** - * The segments, each of which is a specialized hash table. - */ - final Segment<K,V>[] segments; - - transient Set<K> keySet; - transient Set<Map.Entry<K,V>> entrySet; - transient Collection<V> values; - - /** - * ConcurrentHashMap list entry. Note that this is never exported - * out as a user-visible Map.Entry. + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by Comparable.compareTo order if applicable. On lookup at a + * node, if elements are not comparable or compare as 0 then both + * left and right children may need to be searched in the case of + * tied hash values. (This corresponds to the full list search + * that would be necessary if all elements were non-Comparable and + * had tied hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin extends + * StampedLock to support a form of read-write lock. For update + * operations and table validation, the exclusive form of lock + * behaves in the same way as bin-head locks. However, lookups use + * shared read-lock mechanics to allow multiple readers in the + * absence of writers. Additionally, these lookups do not ever + * block: While the lock is not available, they proceed along the + * slow traversal path (via next-pointers) until the lock becomes + * available or the list is exhausted, whichever comes + * first. These cases are not fast, but maximize aggregate + * expected throughput. */ - static final class HashEntry<K,V> { - final int hash; - final K key; - volatile V value; - volatile HashEntry<K,V> next; - - HashEntry(int hash, K key, V value, HashEntry<K,V> next) { - this.hash = hash; - this.key = key; - this.value = value; - this.next = next; + static final class TreeBin<K,V> extends StampedLock { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode<K,V> root; // root of tree + transient TreeNode<K,V> first; // head of next-pointer list + + /** From CLR */ + private void rotateLeft(TreeNode<K,V> p) { + if (p != null) { + TreeNode<K,V> r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode<K,V> p) { + if (p != null) { + TreeNode<K,V> l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + final TreeNode<K,V> getTreeNode(int h, Object k, TreeNode<K,V> p, + Class<?> cc) { + while (p != null) { + int dir, ph; Object pk; Class<?> pc; + if ((ph = p.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.key) == k || k.equals(pk)) + return p; + else if (cc == null || pk == null || + ((pc = pk.getClass()) != cc && + comparableClassFor(pc) != cc) || + (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { + TreeNode<K,V> r, pr; // check both sides + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else // continue left + dir = -1; + } + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final V getValue(int h, Object k) { + Class<?> cc = comparableClassFor(k.getClass()); + Node<K,V> r = null; + for (Node<K,V> e = first; e != null; e = e.next) { + long s; + if ((s = tryReadLock()) != 0L) { + try { + r = getTreeNode(h, k, root, cc); + } finally { + unlockRead(s); + } + break; + } + else if (e.hash == h && k.equals(e.key)) { + r = e; + break; + } + } + return r == null ? null : r.val; + } + + /** + * Finds or adds a node. + * @return null if added + */ + final TreeNode<K,V> putTreeNode(int h, Object k, V v) { + Class<?> cc = comparableClassFor(k.getClass()); + TreeNode<K,V> pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; Object pk; Class<?> pc; + p = pp; + if ((ph = p.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.key) == k || k.equals(pk)) + return p; + else if (cc == null || pk == null || + ((pc = pk.getClass()) != cc && + comparableClassFor(pc) != cc) || + (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { + TreeNode<K,V> r, pr; + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else // continue left + dir = -1; + } + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode<K,V> f = first; + TreeNode<K,V> x = first = new TreeNode<K,V>(h, k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { + if ((xp = x.parent) == null) { + (root = x).red = false; + break; + } + else if (!xp.red || (xpp = xp.parent) == null) { + TreeNode<K,V> r = root; + if (r != null && r.red) + r.red = false; + break; + } + else if ((xppl = xpp.left) == xp) { + if ((xppr = xpp.right) != null && xppr.red) { + xppr.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + if (xppl != null && xppl.red) { + xppl.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + } + assert checkInvariants(); + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode<K,V> p) { + TreeNode<K,V> next = (TreeNode<K,V>)p.next; + TreeNode<K,V> pred = p.prev; // unlink traversal pointers + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + else if (pred == null) { + root = null; + return; + } + TreeNode<K,V> replacement; + TreeNode<K,V> pl = p.left; + TreeNode<K,V> pr = p.right; + if (pl != null && pr != null) { + TreeNode<K,V> s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode<K,V> sr = s.right; + TreeNode<K,V> pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode<K,V> sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + if (sr != null) + replacement = sr; + else + replacement = p; + } + else if (pl != null) + replacement = pl; + else if (pr != null) + replacement = pr; + else + replacement = p; + if (replacement != p) { + TreeNode<K,V> pp = replacement.parent = p.parent; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + for (TreeNode<K,V> x = replacement; x != null; ) { + TreeNode<K,V> xp, xpl, xpr; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + else if ((xpl = xp.left) == x) { + if ((xpr = xp.right) != null && xpr.red) { + xpr.red = false; + xp.red = true; + rotateLeft(xp); + xpr = (xp = x.parent) == null ? null : xp.right; + } + if (xpr == null) + x = xp; + else { + TreeNode<K,V> sl = xpr.left, sr = xpr.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + xpr.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + xpr.red = true; + rotateRight(xpr); + xpr = (xp = x.parent) == null ? + null : xp.right; + } + if (xpr != null) { + xpr.red = (xp == null) ? false : xp.red; + if ((sr = xpr.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + if (xpl != null && xpl.red) { + xpl.red = false; + xp.red = true; + rotateRight(xp); + xpl = (xp = x.parent) == null ? null : xp.left; + } + if (xpl == null) + x = xp; + else { + TreeNode<K,V> sl = xpl.left, sr = xpl.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + xpl.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + xpl.red = true; + rotateLeft(xpl); + xpl = (xp = x.parent) == null ? + null : xp.left; + } + if (xpl != null) { + xpl.red = (xp == null) ? false : xp.red; + if ((sl = xpl.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement) { // detach pointers + TreeNode<K,V> pp; + if ((pp = p.parent) != null) { + if (p == pp.left) + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + assert checkInvariants(); + } + + /** + * Checks linkage and balance invariants at root + */ + final boolean checkInvariants() { + TreeNode<K,V> r = root; + if (r == null) + return (first == null); + else + return (first != null) && checkTreeNode(r); } /** - * Sets next field with volatile write semantics. (See above - * about use of putOrderedObject.) + * Recursive invariant check */ - final void setNext(HashEntry<K,V> n) { - UNSAFE.putOrderedObject(this, nextOffset, n); + final boolean checkTreeNode(TreeNode<K,V> t) { + TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, + tb = t.prev, tn = (TreeNode<K,V>)t.next; + if (tb != null && tb.next != t) + return false; + if (tn != null && tn.prev != t) + return false; + if (tp != null && t != tp.left && t != tp.right) + return false; + if (tl != null && (tl.parent != t || tl.hash > t.hash)) + return false; + if (tr != null && (tr.parent != t || tr.hash < t.hash)) + return false; + if (t.red && tl != null && tl.red && tr != null && tr.red) + return false; + if (tl != null && !checkTreeNode(tl)) + return false; + if (tr != null && !checkTreeNode(tr)) + return false; + return true; + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top bit to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin if key is comparable. Call + * only when locked. + */ + private final void replaceWithTreeBin(Node<K,V>[] tab, int index, Object key) { + if (tab != null && comparableClassFor(key.getClass()) != null) { + TreeBin<K,V> t = new TreeBin<K,V>(); + for (Node<K,V> e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash, e.key, e.val); + setTabAt(tab, index, new Node<K,V>(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final V internalGet(Object k) { + int h = spread(k.hashCode()); + V v = null; + Node<K,V>[] tab; Node<K,V> e; + if ((tab = table) != null && + (e = tabAt(tab, (tab.length - 1) & h)) != null) { + for (;;) { + int eh; Object ek; + if ((eh = e.hash) < 0) { + if ((ek = e.key) instanceof TreeBin) { // search TreeBin + v = ((TreeBin<K,V>)ek).getValue(h, k); + break; + } + else if (!(ek instanceof Node[]) || // try new table + (e = tabAt(tab = (Node<K,V>[])ek, + (tab.length - 1) & h)) == null) + break; + } + else if (eh == h && ((ek = e.key) == k || k.equals(ek))) { + v = e.val; + break; + } + else if ((e = e.next) == null) + break; + } + } + return v; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final V internalReplace(Object k, V v, Object cv) { + int h = spread(k.hashCode()); + V oldVal = null; + for (Node<K,V>[] tab = table;;) { + Node<K,V> f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + Class<?> cc = comparableClassFor(k.getClass()); + TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); + if (p != null) { + V pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if (v != null) + p.val = v; + else { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.unlockWrite(stamp); + } + if (validated) { + if (deleted) + addCount(-1L, -1); + break; + } + } + else + tab = (Node<K,V>[])fk; + } + else { + boolean validated = false; + boolean deleted = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + validated = true; + for (Node<K,V> e = f, pred = null;;) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + V ev = e.val; + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if (v != null) + e.val = v; + else { + deleted = true; + Node<K,V> en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } + if (validated) { + if (deleted) + addCount(-1L, -1); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of insertion methods + * All have the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The putAll method differs mainly in attempting to pre-allocate + * enough table space, and also more lazily performs count updates + * and checks. + * + * Most of the function-accepting methods can't be factored nicely + * because they require different functional forms, so instead + * sprawl out similar mechanics. + */ + + /** Implementation for put and putIfAbsent */ + private final V internalPut(K k, V v, boolean onlyIfAbsent) { + if (k == null || v == null) throw new NullPointerException(); + int h = spread(k.hashCode()); + int len = 0; + for (Node<K,V>[] tab = table;;) { + int i, fh; Node<K,V> f; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + V oldVal = null; + try { + if (tabAt(tab, i) == f) { + len = 2; + TreeNode<K,V> p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + if (!onlyIfAbsent) + p.val = v; + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node<K,V>[])fk; + } + else { + V oldVal = null; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node<K,V> e = f;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = e.val; + if (!onlyIfAbsent) + e.val = v; + break; + } + Node<K,V> last = e; + if ((e = e.next) == null) { + last.next = new Node<K,V>(h, k, v, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + } + addCount(1L, len); + return null; + } + + /** Implementation for computeIfAbsent */ + private final V internalComputeIfAbsent(K k, Function<? super K, ? extends V> mf) { + if (k == null || mf == null) + throw new NullPointerException(); + int h = spread(k.hashCode()); + V val = null; + int len = 0; + for (Node<K,V>[] tab = table;;) { + Node<K,V> f; int i; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node<K,V> node = new Node<K,V>(h, k, null, null); + synchronized (node) { + if (casTabAt(tab, i, null, node)) { + len = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + } + } + } + if (len != 0) + break; + } + else if (f.hash < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + boolean added = false; + try { + if (tabAt(tab, i) == f) { + len = 2; + Class<?> cc = comparableClassFor(k.getClass()); + TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + t.putTreeNode(h, k, val); + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node<K,V>[])fk; + } + else { + boolean added = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node<K,V> e = f;; ++len) { + Object ek; V ev; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + val = e.val; + break; + } + Node<K,V> last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node<K,V>(h, k, val, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } + if (len != 0) { + if (!added) + return val; + break; + } + } } - - // Unsafe mechanics - static final sun.misc.Unsafe UNSAFE; - static final long nextOffset; - static { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - Class<?> k = HashEntry.class; - nextOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("next")); - } catch (Exception e) { - throw new Error(e); + if (val != null) + addCount(1L, len); + return val; + } + + /** Implementation for compute */ + private final V internalCompute(K k, boolean onlyIfPresent, + BiFunction<? super K, ? super V, ? extends V> mf) { + if (k == null || mf == null) + throw new NullPointerException(); + int h = spread(k.hashCode()); + V val = null; + int delta = 0; + int len = 0; + for (Node<K,V>[] tab = table;;) { + Node<K,V> f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node<K,V> node = new Node<K,V>(h, k, null, null); + synchronized (node) { + if (casTabAt(tab, i, null, node)) { + try { + len = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + } + } + } + if (len != 0) + break; + } + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + len = 2; + Class<?> cc = comparableClassFor(k.getClass()); + TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); + if (p != null || !onlyIfPresent) { + V pv = (p == null) ? null : p.val; + if ((val = mf.apply(k, pv)) != null) { + if (p != null) + p.val = val; + else { + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) + break; + } + else + tab = (Node<K,V>[])fk; + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node<K,V> e = f, pred = null;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, e.val); + if (val != null) + e.val = val; + else { + delta = -1; + Node<K,V> en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && + (val = mf.apply(k, null)) != null) { + pred.next = new Node<K,V>(h, k, val, null); + delta = 1; + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } + if (len != 0) + break; + } + } + if (delta != 0) + addCount((long)delta, len); + return val; + } + + /** Implementation for merge */ + private final V internalMerge(K k, V v, + BiFunction<? super V, ? super V, ? extends V> mf) { + if (k == null || v == null || mf == null) + throw new NullPointerException(); + int h = spread(k.hashCode()); + V val = null; + int delta = 0; + int len = 0; + for (Node<K,V>[] tab = table;;) { + int i; Node<K,V> f; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if (f.hash < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + len = 2; + Class<?> cc = comparableClassFor(k.getClass()); + TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); + val = (p == null) ? v : mf.apply(p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) + break; + } + else + tab = (Node<K,V>[])fk; + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node<K,V> e = f, pred = null;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(e.val, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node<K,V> en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + delta = 1; + val = v; + pred.next = new Node<K,V>(h, k, val, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) + break; + } + } + if (delta != 0) + addCount((long)delta, len); + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map<? extends K, ? extends V> m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry<?, ? extends V> entry : m.entrySet()) { + Object k; V v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node<K,V>[] tab = table;;) { + int i; Node<K,V> f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + boolean validated = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + Class<?> cc = comparableClassFor(k.getClass()); + TreeNode<K,V> p = t.getTreeNode(h, k, + t.root, cc); + if (p != null) + p.val = v; + else { + ++delta; + t.putTreeNode(h, k, v); + } + } + } finally { + t.unlockWrite(stamp); + } + if (validated) + break; + } + else + tab = (Node<K,V>[])fk; + } + else { + int len = 0; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node<K,V> e = f;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node<K,V> last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node<K,V>(h, k, v, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) { + if (len > 1) { + addCount(delta, len); + delta = 0L; + } + break; + } + } + } + } + } finally { + if (delta != 0L) + addCount(delta, 2); + } + if (npe) + throw new NullPointerException(); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node<K,V>[] tab = table; + while (tab != null && i < tab.length) { + Node<K,V> f = tabAt(tab, i); + if (f == null) + ++i; + else if (f.hash < 0) { + Object fk; + if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + for (Node<K,V> p = t.first; p != null; p = p.next) + --delta; + t.first = null; + t.root = null; + ++i; + } + } finally { + t.unlockWrite(stamp); + } + } + else + tab = (Node<K,V>[])fk; + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + for (Node<K,V> e = f; e != null; e = e.next) + --delta; + setTabAt(tab, i, null); + ++i; + } + } + } + } + if (delta != 0L) + addCount(delta, -1); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node<K,V>[] initTable() { + Node<K,V>[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + table = tab = (Node<K,V>[])new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * Adds to count, and if table is too small and not already + * resizing, initiates transfer. If already resizing, helps + * perform transfer if work is available. Rechecks occupancy + * after a transfer to see if another resize is already needed + * because resizings are lagging additions. + * + * @param x the count to add + * @param check if <0, don't check resize, if <= 1 only check if uncontended + */ + private final void addCount(long x, int check) { + Cell[] as; long b, s; + if ((as = counterCells) != null || + !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { + Cell a; long v; int m; + boolean uncontended = true; + if (as == null || (m = as.length - 1) < 0 || + (a = as[ThreadLocalRandom.getProbe() & m]) == null || + !(uncontended = + U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { + fullAddCount(x, uncontended); + return; + } + if (check <= 1) + return; + s = sumCount(); + } + if (check >= 0) { + Node<K,V>[] tab, nt; int sc; + while (s >= (long)(sc = sizeCtl) && (tab = table) != null && + tab.length < MAXIMUM_CAPACITY) { + if (sc < 0) { + if (sc == -1 || transferIndex <= transferOrigin || + (nt = nextTable) == null) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) + transfer(tab, nt); + } + else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) + transfer(tab, null); + s = sumCount(); } } } /** - * Gets the ith element of given table (if nonnull) with volatile - * read semantics. Note: This is manually integrated into a few - * performance-sensitive methods to reduce call overhead. - */ - @SuppressWarnings("unchecked") - static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) { - return (tab == null) ? null : - (HashEntry<K,V>) UNSAFE.getObjectVolatile - (tab, ((long)i << TSHIFT) + TBASE); - } - - /** - * Sets the ith element of given table, with volatile write - * semantics. (See above about use of putOrderedObject.) + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) */ - static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i, - HashEntry<K,V> e) { - UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); - } - - /** - * Applies a supplemental hash function to a given hashCode, which - * defends against poor quality hash functions. This is critical - * because ConcurrentHashMap uses power-of-two length hash tables, - * that otherwise encounter collisions for hashCodes that do not - * differ in lower or upper bits. - */ - private int hash(Object k) { - if (k instanceof String) { - return ((String) k).hash32(); + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node<K,V>[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if (table == tab) { + table = (Node<K,V>[])new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (tab == table && + U.compareAndSwapInt(this, SIZECTL, sc, -2)) + transfer(tab, null); } - - int h = hashSeed ^ k.hashCode(); - - // Spread bits to regularize both segment and index locations, - // using variant of single-word Wang/Jenkins hash. - h += (h << 15) ^ 0xffffcd7d; - h ^= (h >>> 10); - h += (h << 3); - h ^= (h >>> 6); - h += (h << 2) + (h << 14); - return h ^ (h >>> 16); } /** - * Segments are specialized versions of hash tables. This - * subclasses from ReentrantLock opportunistically, just to - * simplify some locking and avoid separate construction. + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. */ - static final class Segment<K,V> extends ReentrantLock implements Serializable { - /* - * Segments maintain a table of entry lists that are always - * kept in a consistent state, so can be read (via volatile - * reads of segments and tables) without locking. This - * requires replicating nodes when necessary during table - * resizing, so the old lists can be traversed by readers - * still using old version of table. - * - * This class defines only mutative methods requiring locking. - * Except as noted, the methods of this class perform the - * per-segment versions of ConcurrentHashMap methods. (Other - * methods are integrated directly into ConcurrentHashMap - * methods.) These mutative methods use a form of controlled - * spinning on contention via methods scanAndLock and - * scanAndLockForPut. These intersperse tryLocks with - * traversals to locate nodes. The main benefit is to absorb - * cache misses (which are very common for hash tables) while - * obtaining locks so that traversal is faster once - * acquired. We do not actually use the found nodes since they - * must be re-acquired under lock anyway to ensure sequential - * consistency of updates (and in any case may be undetectably - * stale), but they will normally be much faster to re-locate. - * Also, scanAndLockForPut speculatively creates a fresh node - * to use in put if no node is found. - */ - - private static final long serialVersionUID = 2249069246763182397L; - - /** - * The maximum number of times to tryLock in a prescan before - * possibly blocking on acquire in preparation for a locked - * segment operation. On multiprocessors, using a bounded - * number of retries maintains cache acquired while locating - * nodes. - */ - static final int MAX_SCAN_RETRIES = - Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; - - /** - * The per-segment table. Elements are accessed via - * entryAt/setEntryAt providing volatile semantics. - */ - transient volatile HashEntry<K,V>[] table; - - /** - * The number of elements. Accessed only either within locks - * or among other volatile reads that maintain visibility. - */ - transient int count; - - /** - * The total number of mutative operations in this segment. - * Even though this may overflows 32 bits, it provides - * sufficient accuracy for stability checks in CHM isEmpty() - * and size() methods. Accessed only either within locks or - * among other volatile reads that maintain visibility. - */ - transient int modCount; - - /** - * The table is rehashed when its size exceeds this threshold. - * (The value of this field is always {@code (int)(capacity * - * loadFactor)}.) - */ - transient int threshold; - - /** - * The load factor for the hash table. Even though this value - * is same for all segments, it is replicated to avoid needing - * links to outer object. - * @serial - */ - final float loadFactor; - - Segment(float lf, int threshold, HashEntry<K,V>[] tab) { - this.loadFactor = lf; - this.threshold = threshold; - this.table = tab; + private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { + int n = tab.length, stride; + if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) + stride = MIN_TRANSFER_STRIDE; // subdivide range + if (nextTab == null) { // initiating + try { + nextTab = (Node<K,V>[])new Node[n << 1]; + } catch (Throwable ex) { // try to cope with OOME + sizeCtl = Integer.MAX_VALUE; + return; + } + nextTable = nextTab; + transferOrigin = n; + transferIndex = n; + Node<K,V> rev = new Node<K,V>(MOVED, tab, null, null); + for (int k = n; k > 0;) { // progressively reveal ready slots + int nextk = (k > stride) ? k - stride : 0; + for (int m = nextk; m < k; ++m) + nextTab[m] = rev; + for (int m = n + nextk; m < n + k; ++m) + nextTab[m] = rev; + U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); + } } - - final V put(K key, int hash, V value, boolean onlyIfAbsent) { - HashEntry<K,V> node = tryLock() ? null : - scanAndLockForPut(key, hash, value); - V oldValue; - try { - HashEntry<K,V>[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry<K,V> first = entryAt(tab, index); - for (HashEntry<K,V> e = first;;) { - if (e != null) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - if (!onlyIfAbsent) { - e.value = value; - ++modCount; - } - break; + int nextn = nextTab.length; + Node<K,V> fwd = new Node<K,V>(MOVED, nextTab, null, null); + boolean advance = true; + for (int i = 0, bound = 0;;) { + int nextIndex, nextBound; Node<K,V> f; Object fk; + while (advance) { + if (--i >= bound) + advance = false; + else if ((nextIndex = transferIndex) <= transferOrigin) { + i = -1; + advance = false; + } + else if (U.compareAndSwapInt + (this, TRANSFERINDEX, nextIndex, + nextBound = (nextIndex > stride ? + nextIndex - stride : 0))) { + bound = nextBound; + i = nextIndex - 1; + advance = false; + } + } + if (i < 0 || i >= n || i + n >= nextn) { + for (int sc;;) { + if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { + if (sc == -1) { + nextTable = null; + table = nextTab; + sizeCtl = (n << 1) - (n >>> 1); } - e = e.next; - } - else { - if (node != null) - node.setNext(first); - else - node = new HashEntry<K,V>(hash, key, value, first); - int c = count + 1; - if (c > threshold && tab.length < MAXIMUM_CAPACITY) - rehash(node); - else - setEntryAt(tab, index, node); - ++modCount; - count = c; - oldValue = null; - break; + return; } } - } finally { - unlock(); + } + else if ((f = tabAt(tab, i)) == null) { + if (casTabAt(tab, i, null, fwd)) { + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + advance = true; + } } - return oldValue; - } - - /** - * Doubles size of table and repacks entries, also adding the - * given node to new table - */ - @SuppressWarnings("unchecked") - private void rehash(HashEntry<K,V> node) { - /* - * Reclassify nodes in each list to new table. Because we - * are using power-of-two expansion, the elements from - * each bin must either stay at same index, or move with a - * power of two offset. We eliminate unnecessary node - * creation by catching cases where old nodes can be - * reused because their next fields won't change. - * Statistically, at the default threshold, only about - * one-sixth of them need cloning when a table - * doubles. The nodes they replace will be garbage - * collectable as soon as they are no longer referenced by - * any reader thread that may be in the midst of - * concurrently traversing table. Entry accesses use plain - * array indexing because they are followed by volatile - * table write. - */ - HashEntry<K,V>[] oldTable = table; - int oldCapacity = oldTable.length; - int newCapacity = oldCapacity << 1; - threshold = (int)(newCapacity * loadFactor); - HashEntry<K,V>[] newTable = - (HashEntry<K,V>[]) new HashEntry<?,?>[newCapacity]; - int sizeMask = newCapacity - 1; - for (int i = 0; i < oldCapacity ; i++) { - HashEntry<K,V> e = oldTable[i]; - if (e != null) { - HashEntry<K,V> next = e.next; - int idx = e.hash & sizeMask; - if (next == null) // Single node on list - newTable[idx] = e; - else { // Reuse consecutive sequence at same slot - HashEntry<K,V> lastRun = e; - int lastIdx = idx; - for (HashEntry<K,V> last = next; - last != null; - last = last.next) { - int k = last.hash & sizeMask; - if (k != lastIdx) { - lastIdx = k; - lastRun = last; + else if (f.hash >= 0) { + synchronized (f) { + if (tabAt(tab, i) == f) { + int runBit = f.hash & n; + Node<K,V> lastRun = f, lo = null, hi = null; + for (Node<K,V> p = f.next; p != null; p = p.next) { + int b = p.hash & n; + if (b != runBit) { + runBit = b; + lastRun = p; } } - newTable[lastIdx] = lastRun; - // Clone remaining nodes - for (HashEntry<K,V> p = e; p != lastRun; p = p.next) { - V v = p.value; - int h = p.hash; - int k = h & sizeMask; - HashEntry<K,V> n = newTable[k]; - newTable[k] = new HashEntry<K,V>(h, p.key, v, n); + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node<K,V> p = f; p != lastRun; p = p.next) { + int ph = p.hash; Object pk = p.key; V pv = p.val; + if ((ph & n) == 0) + lo = new Node<K,V>(ph, pk, pv, lo); + else + hi = new Node<K,V>(ph, pk, pv, hi); } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + n, hi); + setTabAt(tab, i, fwd); + advance = true; } } } - int nodeIndex = node.hash & sizeMask; // add the new node - node.setNext(newTable[nodeIndex]); - newTable[nodeIndex] = node; - table = newTable; - } - - /** - * Scans for a node containing given key while trying to - * acquire lock, creating and returning one if not found. Upon - * return, guarantees that lock is held. UNlike in most - * methods, calls to method equals are not screened: Since - * traversal speed doesn't matter, we might as well help warm - * up the associated code and accesses as well. - * - * @return a new node if key not found, else null - */ - private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) { - HashEntry<K,V> first = entryForHash(this, hash); - HashEntry<K,V> e = first; - HashEntry<K,V> node = null; - int retries = -1; // negative while locating node - while (!tryLock()) { - HashEntry<K,V> f; // to recheck first below - if (retries < 0) { - if (e == null) { - if (node == null) // speculatively create node - node = new HashEntry<K,V>(hash, key, value, null); - retries = 0; + else if ((fk = f.key) instanceof TreeBin) { + TreeBin<K,V> t = (TreeBin<K,V>)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + TreeNode<K,V> root; + Node<K,V> ln = null, hn = null; + if ((root = t.root) != null) { + Node<K,V> e, p; TreeNode<K,V> lr, rr; int lh; + TreeBin<K,V> lt = null, ht = null; + for (lr = root; lr.left != null; lr = lr.left); + for (rr = root; rr.right != null; rr = rr.right); + if ((lh = lr.hash) == rr.hash) { // move entire tree + if ((lh & n) == 0) + lt = t; + else + ht = t; + } + else { + lt = new TreeBin<K,V>(); + ht = new TreeBin<K,V>(); + int lc = 0, hc = 0; + for (e = t.first; e != null; e = e.next) { + int h = e.hash; + Object k = e.key; V v = e.val; + if ((h & n) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + if (lc < TREE_THRESHOLD) { // throw away + for (p = lt.first; p != null; p = p.next) + ln = new Node<K,V>(p.hash, p.key, + p.val, ln); + lt = null; + } + if (hc < TREE_THRESHOLD) { + for (p = ht.first; p != null; p = p.next) + hn = new Node<K,V>(p.hash, p.key, + p.val, hn); + ht = null; + } + } + if (ln == null && lt != null) + ln = new Node<K,V>(MOVED, lt, null, null); + if (hn == null && ht != null) + hn = new Node<K,V>(MOVED, ht, null, null); + } + setTabAt(nextTab, i, ln); + setTabAt(nextTab, i + n, hn); + setTabAt(tab, i, fwd); + advance = true; } - else if (key.equals(e.key)) - retries = 0; - else - e = e.next; - } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; - } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; // re-traverse if entry changed - retries = -1; + } finally { + t.unlockWrite(stamp); } } - return node; + else + advance = true; // already processed + } + } + + /* ---------------- Counter support -------------- */ + + final long sumCount() { + Cell[] as = counterCells; Cell a; + long sum = baseCount; + if (as != null) { + for (int i = 0; i < as.length; ++i) { + if ((a = as[i]) != null) + sum += a.value; + } + } + return sum; + } + + // See LongAdder version for explanation + private final void fullAddCount(long x, boolean wasUncontended) { + int h; + if ((h = ThreadLocalRandom.getProbe()) == 0) { + ThreadLocalRandom.localInit(); // force initialization + h = ThreadLocalRandom.getProbe(); + wasUncontended = true; } - - /** - * Scans for a node containing the given key while trying to - * acquire lock for a remove or replace operation. Upon - * return, guarantees that lock is held. Note that we must - * lock even if the key is not found, to ensure sequential - * consistency of updates. - */ - private void scanAndLock(Object key, int hash) { - // similar to but simpler than scanAndLockForPut - HashEntry<K,V> first = entryForHash(this, hash); - HashEntry<K,V> e = first; - int retries = -1; - while (!tryLock()) { - HashEntry<K,V> f; - if (retries < 0) { - if (e == null || key.equals(e.key)) - retries = 0; - else - e = e.next; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = counterCells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (cellsBusy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistic create + if (cellsBusy == 0 && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = counterCells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + cellsBusy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) + break; + else if (counterCells != as || n >= NCPU) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (cellsBusy == 0 && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + try { + if (counterCells == as) {// Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + counterCells = rs; + } + } finally { + cellsBusy = 0; + } + collide = false; + continue; // Retry with expanded table } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; + h = ThreadLocalRandom.advanceProbe(h); + } + else if (cellsBusy == 0 && counterCells == as && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + boolean init = false; + try { // Initialize table + if (counterCells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + counterCells = rs; + init = true; + } + } finally { + cellsBusy = 0; } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; - retries = -1; - } + if (init) + break; } + else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) + break; // Fall back on using base + } + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and spliterators. + * + * Method advance visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + */ + static class Traverser<K,V> { + Node<K,V>[] tab; // current table; updated if resized + Node<K,V> next; // the next entry to use + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + final int baseSize; // initial table size + + Traverser(Node<K,V>[] tab, int size, int index, int limit) { + this.tab = tab; + this.baseSize = size; + this.baseIndex = this.index = index; + this.baseLimit = limit; + this.next = null; } /** - * Remove; match on key only if value null, else match both. + * Advances if possible, returning next valid node, or null if none. */ - final V remove(Object key, int hash, Object value) { - if (!tryLock()) - scanAndLock(key, hash); - V oldValue = null; - try { - HashEntry<K,V>[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry<K,V> e = entryAt(tab, index); - HashEntry<K,V> pred = null; - while (e != null) { - K k; - HashEntry<K,V> next = e.next; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - V v = e.value; - if (value == null || value == v || value.equals(v)) { - if (pred == null) - setEntryAt(tab, index, next); - else - pred.setNext(next); - ++modCount; - --count; - oldValue = v; - } - break; - } - pred = e; - e = next; - } - } finally { - unlock(); - } - return oldValue; - } - - final boolean replace(K key, int hash, V oldValue, V newValue) { - if (!tryLock()) - scanAndLock(key, hash); - boolean replaced = false; - try { - HashEntry<K,V> e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - if (oldValue.equals(e.value)) { - e.value = newValue; - ++modCount; - replaced = true; - } - break; + final Node<K,V> advance() { + Node<K,V> e; + if ((e = next) != null) + e = e.next; + for (;;) { + Node<K,V>[] t; int i, n; Object ek; // must use locals in checks + if (e != null) + return next = e; + if (baseIndex >= baseLimit || (t = tab) == null || + (n = t.length) <= (i = index) || i < 0) + return next = null; + if ((e = tabAt(t, index)) != null && e.hash < 0) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin<K,V>)ek).first; + else { + tab = (Node<K,V>[])ek; + e = null; + continue; } } - } finally { - unlock(); - } - return replaced; - } - - final V replace(K key, int hash, V value) { - if (!tryLock()) - scanAndLock(key, hash); - V oldValue = null; - try { - HashEntry<K,V> e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - e.value = value; - ++modCount; - break; - } - } - } finally { - unlock(); - } - return oldValue; - } - - final void clear() { - lock(); - try { - HashEntry<K,V>[] tab = table; - for (int i = 0; i < tab.length ; i++) - setEntryAt(tab, i, null); - ++modCount; - count = 0; - } finally { - unlock(); + if ((index += baseSize) >= n) + index = ++baseIndex; // visit upper slots if present } } } - // Accessing segments - /** - * Gets the jth element of given segment array (if nonnull) with - * volatile element access semantics via Unsafe. (The null check - * can trigger harmlessly only during deserialization.) Note: - * because each element of segments array is set only once (using - * fully ordered writes), some performance-sensitive methods rely - * on this method only as a recheck upon null reads. - */ - @SuppressWarnings("unchecked") - static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) { - long u = (j << SSHIFT) + SBASE; - return ss == null ? null : - (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u); - } - - /** - * Returns the segment for the given index, creating it and - * recording in segment table (via CAS) if not already present. - * - * @param k the index - * @return the segment + * Base of key, value, and entry Iterators. Adds fields to + * Traverser to support iterator.remove */ - @SuppressWarnings("unchecked") - private Segment<K,V> ensureSegment(int k) { - final Segment<K,V>[] ss = this.segments; - long u = (k << SSHIFT) + SBASE; // raw offset - Segment<K,V> seg; - if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { - Segment<K,V> proto = ss[0]; // use segment 0 as prototype - int cap = proto.table.length; - float lf = proto.loadFactor; - int threshold = (int)(cap * lf); - HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry<?,?>[cap]; - if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) - == null) { // recheck - Segment<K,V> s = new Segment<K,V>(lf, threshold, tab); - while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) - == null) { - if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) - break; - } - } + static class BaseIterator<K,V> extends Traverser<K,V> { + final ConcurrentHashMap<K,V> map; + Node<K,V> lastReturned; + BaseIterator(Node<K,V>[] tab, int size, int index, int limit, + ConcurrentHashMap<K,V> map) { + super(tab, size, index, limit); + this.map = map; + advance(); + } + + public final boolean hasNext() { return next != null; } + public final boolean hasMoreElements() { return next != null; } + + public final void remove() { + Node<K,V> p; + if ((p = lastReturned) == null) + throw new IllegalStateException(); + lastReturned = null; + map.internalReplace((K)p.key, null, null); + } + } + + static final class KeyIterator<K,V> extends BaseIterator<K,V> + implements Iterator<K>, Enumeration<K> { + KeyIterator(Node<K,V>[] tab, int index, int size, int limit, + ConcurrentHashMap<K,V> map) { + super(tab, index, size, limit, map); + } + + public final K next() { + Node<K,V> p; + if ((p = next) == null) + throw new NoSuchElementException(); + K k = (K)p.key; + lastReturned = p; + advance(); + return k; } - return seg; + + public final K nextElement() { return next(); } + } + + static final class ValueIterator<K,V> extends BaseIterator<K,V> + implements Iterator<V>, Enumeration<V> { + ValueIterator(Node<K,V>[] tab, int index, int size, int limit, + ConcurrentHashMap<K,V> map) { + super(tab, index, size, limit, map); + } + + public final V next() { + Node<K,V> p; + if ((p = next) == null) + throw new NoSuchElementException(); + V v = p.val; + lastReturned = p; + advance(); + return v; + } + + public final V nextElement() { return next(); } + } + + static final class EntryIterator<K,V> extends BaseIterator<K,V> + implements Iterator<Map.Entry<K,V>> { + EntryIterator(Node<K,V>[] tab, int index, int size, int limit, + ConcurrentHashMap<K,V> map) { + super(tab, index, size, limit, map); + } + + public final Map.Entry<K,V> next() { + Node<K,V> p; + if ((p = next) == null) + throw new NoSuchElementException(); + K k = (K)p.key; + V v = p.val; + lastReturned = p; + advance(); + return new MapEntry<K,V>(k, v, map); + } } - // Hash-based segment and entry accesses - - /** - * Gets the segment for the given hash code. - */ - @SuppressWarnings("unchecked") - private Segment<K,V> segmentForHash(int h) { - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u); + static final class KeySpliterator<K,V> extends Traverser<K,V> + implements Spliterator<K> { + long est; // size estimate + KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, + long est) { + super(tab, size, index, limit); + this.est = est; + } + + public Spliterator<K> trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, + f, est >>>= 1); + } + + public void forEachRemaining(Consumer<? super K> action) { + if (action == null) throw new NullPointerException(); + for (Node<K,V> p; (p = advance()) != null;) + action.accept((K)p.key); + } + + public boolean tryAdvance(Consumer<? super K> action) { + if (action == null) throw new NullPointerException(); + Node<K,V> p; + if ((p = advance()) == null) + return false; + action.accept((K)p.key); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.CONCURRENT | + Spliterator.NONNULL; + } } - /** - * Gets the table entry for the given segment and hash code. - */ - @SuppressWarnings("unchecked") - static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) { - HashEntry<K,V>[] tab; - return (seg == null || (tab = seg.table) == null) ? null : - (HashEntry<K,V>) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + static final class ValueSpliterator<K,V> extends Traverser<K,V> + implements Spliterator<V> { + long est; // size estimate + ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, + long est) { + super(tab, size, index, limit); + this.est = est; + } + + public Spliterator<V> trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, + f, est >>>= 1); + } + + public void forEachRemaining(Consumer<? super V> action) { + if (action == null) throw new NullPointerException(); + for (Node<K,V> p; (p = advance()) != null;) + action.accept(p.val); + } + + public boolean tryAdvance(Consumer<? super V> action) { + if (action == null) throw new NullPointerException(); + Node<K,V> p; + if ((p = advance()) == null) + return false; + action.accept(p.val); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.CONCURRENT | Spliterator.NONNULL; + } } + static final class EntrySpliterator<K,V> extends Traverser<K,V> + implements Spliterator<Map.Entry<K,V>> { + final ConcurrentHashMap<K,V> map; // To export MapEntry + long est; // size estimate + EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, + long est, ConcurrentHashMap<K,V> map) { + super(tab, size, index, limit); + this.map = map; + this.est = est; + } + + public Spliterator<Map.Entry<K,V>> trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, + f, est >>>= 1, map); + } + + public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { + if (action == null) throw new NullPointerException(); + for (Node<K,V> p; (p = advance()) != null; ) + action.accept(new MapEntry<K,V>((K)p.key, p.val, map)); + } + + public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { + if (action == null) throw new NullPointerException(); + Node<K,V> p; + if ((p = advance()) == null) + return false; + action.accept(new MapEntry<K,V>((K)p.key, p.val, map)); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.CONCURRENT | + Spliterator.NONNULL; + } + } + + /* ---------------- Public operations -------------- */ /** - * Creates a new, empty map with the specified initial - * capacity, load factor and concurrency level. - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. - * @param concurrencyLevel the estimated number of concurrently - * updating threads. The implementation performs internal sizing - * to try to accommodate this many threads. - * @throws IllegalArgumentException if the initial capacity is - * negative or the load factor or concurrencyLevel are - * nonpositive. + * Creates a new, empty map with the default initial table size (16). */ - @SuppressWarnings("unchecked") - public ConcurrentHashMap(int initialCapacity, - float loadFactor, int concurrencyLevel) { - if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) - throw new IllegalArgumentException(); - if (concurrencyLevel > MAX_SEGMENTS) - concurrencyLevel = MAX_SEGMENTS; - // Find power-of-two sizes best matching arguments - int sshift = 0; - int ssize = 1; - while (ssize < concurrencyLevel) { - ++sshift; - ssize <<= 1; - } - this.segmentShift = 32 - sshift; - this.segmentMask = ssize - 1; - if (initialCapacity > MAXIMUM_CAPACITY) - initialCapacity = MAXIMUM_CAPACITY; - int c = initialCapacity / ssize; - if (c * ssize < initialCapacity) - ++c; - int cap = MIN_SEGMENT_TABLE_CAPACITY; - while (cap < c) - cap <<= 1; - // create segments and segments[0] - Segment<K,V> s0 = - new Segment<K,V>(loadFactor, (int)(cap * loadFactor), - (HashEntry<K,V>[])new HashEntry<?,?>[cap]); - Segment<K,V>[] ss = (Segment<K,V>[])new Segment<?,?>[ssize]; - UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] - this.segments = ss; + public ConcurrentHashMap() { } /** - * Creates a new, empty map with the specified initial capacity - * and load factor and with the default concurrencyLevel (16). + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMap(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMap(Map<? extends K, ? extends V> m) { + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size * @throws IllegalArgumentException if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public ConcurrentHashMap(int initialCapacity, float loadFactor) { - this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new, empty map with the specified initial capacity, - * and with default load factor (0.75) and concurrencyLevel (16). - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @throws IllegalArgumentException if the initial capacity of - * elements is negative. - */ - public ConcurrentHashMap(int initialCapacity) { - this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + this(initialCapacity, loadFactor, 1); } /** - * Creates a new, empty map with a default initial capacity (16), - * load factor (0.75) and concurrencyLevel (16). + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive */ - public ConcurrentHashMap() { - this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + public ConcurrentHashMap(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.sizeCtl = cap; } /** - * Creates a new map with the same mappings as the given map. - * The map is created with a capacity of 1.5 times the number - * of mappings in the given map or 16 (whichever is greater), - * and a default load factor (0.75) and concurrencyLevel (16). + * Creates a new {@link Set} backed by a ConcurrentHashMap + * from the given type to {@code Boolean.TRUE}. * - * @param m the map + * @return the new set + * @since 1.8 */ - public ConcurrentHashMap(Map<? extends K, ? extends V> m) { - this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - putAll(m); + public static <K> KeySetView<K,Boolean> newKeySet() { + return new KeySetView<K,Boolean> + (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMap + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + * @since 1.8 + */ + public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { + return new KeySetView<K,Boolean> + (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); } /** @@ -834,38 +2639,7 @@ * @return {@code true} if this map contains no key-value mappings */ public boolean isEmpty() { - /* - * Sum per-segment modCounts to avoid mis-reporting when - * elements are concurrently added and removed in one segment - * while checking another, in which case the table was never - * actually empty at any point. (The sum ensures accuracy up - * through at least 1<<31 per-segment modifications before - * recheck.) Methods size() and containsValue() use similar - * constructions for stability checks. - */ - long sum = 0L; - final Segment<K,V>[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment<K,V> seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) - return false; - sum += seg.modCount; - } - } - if (sum != 0L) { // recheck unless no modifications - for (int j = 0; j < segments.length; ++j) { - Segment<K,V> seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) - return false; - sum -= seg.modCount; - } - } - if (sum != 0L) - return false; - } - return true; + return sumCount() <= 0L; // ignore transient negative values } /** @@ -876,43 +2650,25 @@ * @return the number of key-value mappings in this map */ public int size() { - // Try a few times to get accurate count. On failure due to - // continuous async changes in table, resort to locking. - final Segment<K,V>[] segments = this.segments; - int size; - boolean overflow; // true if size overflows 32 bits - long sum; // sum of modCounts - long last = 0L; // previous sum - int retries = -1; // first iteration isn't retry - try { - for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - ensureSegment(j).lock(); // force creation - } - sum = 0L; - size = 0; - overflow = false; - for (int j = 0; j < segments.length; ++j) { - Segment<K,V> seg = segmentAt(segments, j); - if (seg != null) { - sum += seg.modCount; - int c = seg.count; - if (c < 0 || (size += c) < 0) - overflow = true; - } - } - if (sum == last) - break; - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - segmentAt(segments, j).unlock(); - } - } - return overflow ? Integer.MAX_VALUE : size; + long n = sumCount(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMap may + * contain more mappings than can be represented as an int. The + * value returned is an estimate; the actual count may differ if + * there are concurrent insertions or removals. + * + * @return the number of mappings + * @since 1.8 + */ + public long mappingCount() { + long n = sumCount(); + return (n < 0L) ? 0L : n; // ignore transient negative values } /** @@ -926,23 +2682,24 @@ * * @throws NullPointerException if the specified key is null */ - @SuppressWarnings("unchecked") public V get(Object key) { - Segment<K,V> s; // manually integrate access methods to reduce overhead - HashEntry<K,V>[] tab; - int h = hash(key); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) - return e.value; - } - } - return null; + return internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, or the + * given default value if this map contains no mapping for the + * key. + * + * @param key the key whose associated value is to be returned + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the default value + * @throws NullPointerException if the specified key is null + */ + public V getOrDefault(Object key, V defaultValue) { + V v; + return (v = internalGet(key)) == null ? defaultValue : v; } /** @@ -954,29 +2711,14 @@ * {@code equals} method; {@code false} otherwise * @throws NullPointerException if the specified key is null */ - @SuppressWarnings("unchecked") public boolean containsKey(Object key) { - Segment<K,V> s; // same as get() except no need for volatile value read - HashEntry<K,V>[] tab; - int h = hash(key); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) - return true; - } - } - return false; + return internalGet(key) != null; } /** * Returns {@code true} if this map maps one or more keys to the - * specified value. Note: This method requires a full traversal - * of the map, and so is much slower than method {@code containsKey}. + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. * * @param value value whose presence in this map is to be tested * @return {@code true} if this map maps one or more keys to the @@ -984,49 +2726,18 @@ * @throws NullPointerException if the specified value is null */ public boolean containsValue(Object value) { - // Same idea as size() if (value == null) throw new NullPointerException(); - final Segment<K,V>[] segments = this.segments; - boolean found = false; - long last = 0; - int retries = -1; - try { - outer: for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - ensureSegment(j).lock(); // force creation - } - long hashSum = 0L; - int sum = 0; - for (int j = 0; j < segments.length; ++j) { - HashEntry<K,V>[] tab; - Segment<K,V> seg = segmentAt(segments, j); - if (seg != null && (tab = seg.table) != null) { - for (int i = 0 ; i < tab.length; i++) { - HashEntry<K,V> e; - for (e = entryAt(tab, i); e != null; e = e.next) { - V v = e.value; - if (v != null && value.equals(v)) { - found = true; - break outer; - } - } - } - sum += seg.modCount; - } - } - if (retries > 0 && sum == last) - break; - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - segmentAt(segments, j).unlock(); + Node<K,V>[] t; + if ((t = table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) { + V v; + if ((v = p.val) == value || value.equals(v)) + return true; } } - return found; + return false; } /** @@ -1061,17 +2772,8 @@ * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V put(K key, V value) { - Segment<K,V> s; - if (value == null) - throw new NullPointerException(); - int hash = hash(key); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck - (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment - s = ensureSegment(j); - return s.put(key, hash, value, false); + return internalPut(key, value, false); } /** @@ -1081,17 +2783,8 @@ * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { - Segment<K,V> s; - if (value == null) - throw new NullPointerException(); - int hash = hash(key); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment<K,V>)UNSAFE.getObject - (segments, (j << SSHIFT) + SBASE)) == null) - s = ensureSegment(j); - return s.put(key, hash, value, true); + return internalPut(key, value, true); } /** @@ -1102,8 +2795,105 @@ * @param m mappings to be stored in this map */ public void putAll(Map<? extends K, ? extends V> m) { - for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) - put(e.getKey(), e.getValue()); + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * attempts to compute its value using the given mapping function + * and enters it into this map unless {@code null}. The entire + * method invocation is performed atomically, so the function is + * applied at most once per key. Some attempted update operations + * on this map by other threads may be blocked while computation + * is in progress, so the computation should be short and simple, + * and must not attempt to update any other mappings of this map. + * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { + return internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the value for the specified key is present, attempts to + * compute a new mapping given the key and its current mapped + * value. The entire method invocation is performed atomically. + * Some attempted update operations on this map by other threads + * may be blocked while computation is in progress, so the + * computation should be short and simple, and must not attempt to + * update any other mappings of this map. + * + * @param key key with which a value may be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { + return internalCompute(key, true, remappingFunction); + } + + /** + * Attempts to compute a mapping for the specified key and its + * current mapped value (or {@code null} if there is no current + * mapping). The entire method invocation is performed atomically. + * Some attempted update operations on this map by other threads + * may be blocked while computation is in progress, so the + * computation should be short and simple, and must not attempt to + * update any other mappings of this Map. + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { + return internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated with a + * (non-null) value, associates it with the given value. + * Otherwise, replaces the value with the results of the given + * remapping function, or removes if {@code null}. The entire + * method invocation is performed atomically. Some attempted + * update operations on this map by other threads may be blocked + * while computation is in progress, so the computation should be + * short and simple, and must not attempt to update any other + * mappings of this Map. + * + * @param key key with which the specified value is to be associated + * @param value the value to use if absent + * @param remappingFunction the function to recompute a value if present + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or the + * remappingFunction is null + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { + return internalMerge(key, value, remappingFunction); } /** @@ -1116,9 +2906,7 @@ * @throws NullPointerException if the specified key is null */ public V remove(Object key) { - int hash = hash(key); - Segment<K,V> s = segmentForHash(hash); - return s == null ? null : s.remove(key, hash, null); + return internalReplace(key, null, null); } /** @@ -1127,10 +2915,9 @@ * @throws NullPointerException if the specified key is null */ public boolean remove(Object key, Object value) { - int hash = hash(key); - Segment<K,V> s; - return value != null && (s = segmentForHash(hash)) != null && - s.remove(key, hash, value) != null; + if (key == null) + throw new NullPointerException(); + return value != null && internalReplace(key, null, value) != null; } /** @@ -1139,11 +2926,9 @@ * @throws NullPointerException if any of the arguments are null */ public boolean replace(K key, V oldValue, V newValue) { - int hash = hash(key); - if (oldValue == null || newValue == null) + if (key == null || oldValue == null || newValue == null) throw new NullPointerException(); - Segment<K,V> s = segmentForHash(hash); - return s != null && s.replace(key, hash, oldValue, newValue); + return internalReplace(key, newValue, oldValue) != null; } /** @@ -1154,23 +2939,16 @@ * @throws NullPointerException if the specified key or value is null */ public V replace(K key, V value) { - int hash = hash(key); - if (value == null) + if (key == null || value == null) throw new NullPointerException(); - Segment<K,V> s = segmentForHash(hash); - return s == null ? null : s.replace(key, hash, value); + return internalReplace(key, value, null); } /** * Removes all of the mappings from this map. */ public void clear() { - final Segment<K,V>[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment<K,V> s = segmentAt(segments, j); - if (s != null) - s.clear(); - } + internalClear(); } /** @@ -1188,10 +2966,29 @@ * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. + * + * @return the set view */ - public Set<K> keySet() { - Set<K> ks = keySet; - return (ks != null) ? ks : (keySet = new KeySet()); + public KeySetView<K,V> keySet() { + KeySetView<K,V> ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll(Collection)}). + * This is of course only appropriate if it is acceptable to use + * the same value for all additions from this view. + * + * @param mappedValue the mapped value to use for any additions + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView<K,V> keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView<K,V>(this, mappedValue); } /** @@ -1209,10 +3006,12 @@ * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. + * + * @return the collection view */ public Collection<V> values() { - Collection<V> vs = values; - return (vs != null) ? vs : (values = new Values()); + ValuesView<K,V> vs = values; + return (vs != null) ? vs : (values = new ValuesView<K,V>(this)); } /** @@ -1222,18 +3021,19 @@ * removal, which removes the corresponding mapping from the map, * via the {@code Iterator.remove}, {@code Set.remove}, * {@code removeAll}, {@code retainAll}, and {@code clear} - * operations. It does not support the {@code add} or - * {@code addAll} operations. + * operations. * * <p>The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. + * + * @return the set view */ public Set<Map.Entry<K,V>> entrySet() { - Set<Map.Entry<K,V>> es = entrySet; - return (es != null) ? es : (entrySet = new EntrySet()); + EntrySetView<K,V> es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this)); } /** @@ -1243,7 +3043,9 @@ * @see #keySet() */ public Enumeration<K> keys() { - return new KeyIterator(); + Node<K,V>[] t; + int f = (t = table) == null ? 0 : t.length; + return new KeyIterator<K,V>(t, f, 0, f, this); } /** @@ -1253,192 +3055,111 @@ * @see #values() */ public Enumeration<V> elements() { - return new ValueIterator(); + Node<K,V>[] t; + int f = (t = table) == null ? 0 : t.length; + return new ValueIterator<K,V>(t, f, 0, f, this); } - /* ---------------- Iterator Support -------------- */ - - abstract class HashIterator { - int nextSegmentIndex; - int nextTableIndex; - HashEntry<K,V>[] currentTable; - HashEntry<K, V> nextEntry; - HashEntry<K, V> lastReturned; - - HashIterator() { - nextSegmentIndex = segments.length - 1; - nextTableIndex = -1; - advance(); + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Node<K,V>[] t; + if ((t = table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) + h += p.key.hashCode() ^ p.val.hashCode(); } - - /** - * Sets nextEntry to first node of next non-empty table - * (in backwards order, to simplify checks). - */ - final void advance() { - for (;;) { - if (nextTableIndex >= 0) { - if ((nextEntry = entryAt(currentTable, - nextTableIndex--)) != null) - break; - } - else if (nextSegmentIndex >= 0) { - Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--); - if (seg != null && (currentTable = seg.table) != null) - nextTableIndex = currentTable.length - 1; - } - else - break; - } - } - - final HashEntry<K,V> nextEntry() { - HashEntry<K,V> e = nextEntry; - if (e == null) - throw new NoSuchElementException(); - lastReturned = e; // cannot assign until after null check - if ((nextEntry = e.next) == null) - advance(); - return e; - } - - public final boolean hasNext() { return nextEntry != null; } - public final boolean hasMoreElements() { return nextEntry != null; } - - public final void remove() { - if (lastReturned == null) - throw new IllegalStateException(); - ConcurrentHashMap.this.remove(lastReturned.key); - lastReturned = null; - } - } - - final class KeyIterator - extends HashIterator - implements Iterator<K>, Enumeration<K> - { - public final K next() { return super.nextEntry().key; } - public final K nextElement() { return super.nextEntry().key; } - } - - final class ValueIterator - extends HashIterator - implements Iterator<V>, Enumeration<V> - { - public final V next() { return super.nextEntry().value; } - public final V nextElement() { return super.nextEntry().value; } + return h; } /** - * Custom Entry class used by EntryIterator.next(), that relays - * setValue changes to the underlying map. + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map */ - final class WriteThroughEntry - extends AbstractMap.SimpleEntry<K,V> - { - static final long serialVersionUID = 7249069246763182397L; - - WriteThroughEntry(K k, V v) { - super(k,v); - } - - /** - * Sets our entry's value and writes through to the map. The - * value to return is somewhat arbitrary here. Since a - * WriteThroughEntry does not necessarily track asynchronous - * changes, the most recent "previous" value could be - * different from what we return (or could even have been - * removed in which case the put will re-establish). We do not - * and cannot guarantee more. - */ - public V setValue(V value) { - if (value == null) throw new NullPointerException(); - V v = super.setValue(value); - ConcurrentHashMap.this.put(getKey(), value); - return v; - } - } - - final class EntryIterator - extends HashIterator - implements Iterator<Entry<K,V>> - { - public Map.Entry<K,V> next() { - HashEntry<K,V> e = super.nextEntry(); - return new WriteThroughEntry(e.key, e.value); - } - } - - final class KeySet extends AbstractSet<K> { - public Iterator<K> iterator() { - return new KeyIterator(); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public boolean contains(Object o) { - return ConcurrentHashMap.this.containsKey(o); + public String toString() { + Node<K,V>[] t; + int f = (t = table) == null ? 0 : t.length; + Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Node<K,V> p; + if ((p = it.advance()) != null) { + for (;;) { + K k = (K)p.key; + V v = p.val; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((p = it.advance()) == null) + break; + sb.append(',').append(' '); + } } - public boolean remove(Object o) { - return ConcurrentHashMap.this.remove(o) != null; - } - public void clear() { - ConcurrentHashMap.this.clear(); - } - } - - final class Values extends AbstractCollection<V> { - public Iterator<V> iterator() { - return new ValueIterator(); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public boolean contains(Object o) { - return ConcurrentHashMap.this.containsValue(o); - } - public void clear() { - ConcurrentHashMap.this.clear(); - } + return sb.append('}').toString(); } - final class EntrySet extends AbstractSet<Map.Entry<K,V>> { - public Iterator<Map.Entry<K,V>> iterator() { - return new EntryIterator(); - } - public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) return false; - Map.Entry<?,?> e = (Map.Entry<?,?>)o; - V v = ConcurrentHashMap.this.get(e.getKey()); - return v != null && v.equals(e.getValue()); + Map<?,?> m = (Map<?,?>) o; + Node<K,V>[] t; + int f = (t = table) == null ? 0 : t.length; + Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); + for (Node<K,V> p; (p = it.advance()) != null; ) { + V val = p.val; + Object v = m.get(p.key); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry<?,?> e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } } - public boolean remove(Object o) { - if (!(o instanceof Map.Entry)) - return false; - Map.Entry<?,?> e = (Map.Entry<?,?>)o; - return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public void clear() { - ConcurrentHashMap.this.clear(); - } + return true; } /* ---------------- Serialization Support -------------- */ /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment<K,V> extends ReentrantLock implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** * Saves the state of the {@code ConcurrentHashMap} instance to a * stream (i.e., serializes it). * @param s the stream @@ -1448,119 +3169,2733 @@ * The key-value mappings are emitted in no particular order. */ private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - // force all segments for serialization compatibility - for (int k = 0; k < segments.length; ++k) - ensureSegment(k); - s.defaultWriteObject(); - - final Segment<K,V>[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment<K,V> seg = segmentAt(segments, k); - seg.lock(); - try { - HashEntry<K,V>[] tab = seg.table; - for (int i = 0; i < tab.length; ++i) { - HashEntry<K,V> e; - for (e = entryAt(tab, i); e != null; e = e.next) { - s.writeObject(e.key); - s.writeObject(e.value); - } - } - } finally { - seg.unlock(); + throws java.io.IOException { + // For serialization compatibility + // Emulate segment calculation from previous version of this class + int sshift = 0; + int ssize = 1; + while (ssize < DEFAULT_CONCURRENCY_LEVEL) { + ++sshift; + ssize <<= 1; + } + int segmentShift = 32 - sshift; + int segmentMask = ssize - 1; + Segment<K,V>[] segments = (Segment<K,V>[]) + new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment<K,V>(LOAD_FACTOR); + s.putFields().put("segments", segments); + s.putFields().put("segmentShift", segmentShift); + s.putFields().put("segmentMask", segmentMask); + s.writeFields(); + + Node<K,V>[] t; + if ((t = table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) { + s.writeObject(p.key); + s.writeObject(p.val); } } s.writeObject(null); s.writeObject(null); + segments = null; // throw away } /** * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream */ - @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - // Don't call defaultReadObject() - ObjectInputStream.GetField oisFields = s.readFields(); - final Segment<K,V>[] oisSegments = (Segment<K,V>[])oisFields.get("segments", null); - - final int ssize = oisSegments.length; - if (ssize < 1 || ssize > MAX_SEGMENTS - || (ssize & (ssize-1)) != 0 ) // ssize not power of two - throw new java.io.InvalidObjectException("Bad number of segments:" - + ssize); - int sshift = 0, ssizeTmp = ssize; - while (ssizeTmp > 1) { - ++sshift; - ssizeTmp >>>= 1; + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node<K,V> p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node<K,V>(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if (table == null) { + init = true; + Node<K,V>[] tab = (Node<K,V>[])new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node<K,V> next = p.next; + Node<K,V> q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + addCount(size, -1); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node<K,V>[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node<K,V> e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut((K)p.key, p.val, false); + p = p.next; + } + } + } + } + + // ------------------------------------------------------- + + // Overrides of other default Map methods + + public void forEach(BiConsumer<? super K, ? super V> action) { + if (action == null) throw new NullPointerException(); + Node<K,V>[] t; + if ((t = table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) { + action.accept((K)p.key, p.val); + } + } + } + + public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { + if (function == null) throw new NullPointerException(); + Node<K,V>[] t; + if ((t = table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) { + K k = (K)p.key; + internalPut(k, function.apply(k, p.val), false); + } } - UNSAFE.putIntVolatile(this, SEGSHIFT_OFFSET, 32 - sshift); - UNSAFE.putIntVolatile(this, SEGMASK_OFFSET, ssize - 1); - UNSAFE.putObjectVolatile(this, SEGMENTS_OFFSET, oisSegments); - - // set hashMask - UNSAFE.putIntVolatile(this, HASHSEED_OFFSET, - sun.misc.Hashing.randomHashSeed(this)); - - // Re-initialize segments to be minimally sized, and let grow. - int cap = MIN_SEGMENT_TABLE_CAPACITY; - final Segment<K,V>[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment<K,V> seg = segments[k]; - if (seg != null) { - seg.threshold = (int)(cap * seg.loadFactor); - seg.table = (HashEntry<K,V>[]) new HashEntry<?,?>[cap]; + } + + // ------------------------------------------------------- + + // Parallel bulk operations + + /** + * Computes initial batch value for bulk tasks. The returned value + * is approximately exp2 of the number of times (minus one) to + * split task by two before executing leaf action. This value is + * faster to compute and more convenient to use as a guide to + * splitting than is the depth, since it is used while dividing by + * two anyway. + */ + final int batchFor(long b) { + long n; + if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) + return 0; + int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 + return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; + } + + /** + * Performs the given action for each (key, value). + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEach(long parallelismThreshold, + BiConsumer<? super K,? super V> action) { + if (action == null) throw new NullPointerException(); + new ForEachMappingTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each (key, value). + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public <U> void forEach(long parallelismThreshold, + BiFunction<? super K, ? super V, ? extends U> transformer, + Consumer<? super U> action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedMappingTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each (key, value), or null if none. Upon + * success, further element processing is suppressed and the + * results of any other parallel invocations of the search + * function are ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each (key, value), or null if none + * @since 1.8 + */ + public <U> U search(long parallelismThreshold, + BiFunction<? super K, ? super V, ? extends U> searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchMappingsTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference<U>()).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public <U> U reduce(long parallelismThreshold, + BiFunction<? super K, ? super V, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public double reduceToDoubleIn(long parallelismThreshold, + ToDoubleBiFunction<? super K, ? super V> transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToDoubleTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public long reduceToLong(long parallelismThreshold, + ToLongBiFunction<? super K, ? super V> transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToLongTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public int reduceToInt(long parallelismThreshold, + ToIntBiFunction<? super K, ? super V> transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToIntTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each key. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachKey(long parallelismThreshold, + Consumer<? super K> action) { + if (action == null) throw new NullPointerException(); + new ForEachKeyTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each key. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public <U> void forEachKey(long parallelismThreshold, + Function<? super K, ? extends U> transformer, + Consumer<? super U> action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedKeyTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each key, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each key, or null if none + * @since 1.8 + */ + public <U> U searchKeys(long parallelismThreshold, + Function<? super K, ? extends U> searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchKeysTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference<U>()).invoke(); + } + + /** + * Returns the result of accumulating all keys using the given + * reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all keys using the given + * reducer to combine values, or null if none + * @since 1.8 + */ + public K reduceKeys(long parallelismThreshold, + BiFunction<? super K, ? super K, ? extends K> reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceKeysTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, or + * null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public <U> U reduceKeys(long parallelismThreshold, + Function<? super K, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public double reduceKeysToDouble(long parallelismThreshold, + ToDoubleFunction<? super K> transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToDoubleTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public long reduceKeysToLong(long parallelismThreshold, + ToLongFunction<? super K> transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToLongTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public int reduceKeysToInt(long parallelismThreshold, + ToIntFunction<? super K> transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToIntTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachValue(long parallelismThreshold, + Consumer<? super V> action) { + if (action == null) + throw new NullPointerException(); + new ForEachValueTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public <U> void forEachValue(long parallelismThreshold, + Function<? super V, ? extends U> transformer, + Consumer<? super U> action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedValueTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each value, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each value, or null if none + * @since 1.8 + */ + public <U> U searchValues(long parallelismThreshold, + Function<? super V, ? extends U> searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchValuesTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference<U>()).invoke(); + } + + /** + * Returns the result of accumulating all values using the + * given reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all values + * @since 1.8 + */ + public V reduceValues(long parallelismThreshold, + BiFunction<? super V, ? super V, ? extends V> reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceValuesTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, or + * null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public <U> U reduceValues(long parallelismThreshold, + Function<? super V, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public double reduceValuesToDouble(long parallelismThreshold, + ToDoubleFunction<? super V> transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToDoubleTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public long reduceValuesToLong(long parallelismThreshold, + ToLongFunction<? super V> transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToLongTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public int reduceValuesToInt(long parallelismThreshold, + ToIntFunction<? super V> transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToIntTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each entry. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachEntry(long parallelismThreshold, + Consumer<? super Map.Entry<K,V>> action) { + if (action == null) throw new NullPointerException(); + new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each entry. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public <U> void forEachEntry(long parallelismThreshold, + Function<Map.Entry<K,V>, ? extends U> transformer, + Consumer<? super U> action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedEntryTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each entry, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each entry, or null if none + * @since 1.8 + */ + public <U> U searchEntries(long parallelismThreshold, + Function<Map.Entry<K,V>, ? extends U> searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchEntriesTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference<U>()).invoke(); + } + + /** + * Returns the result of accumulating all entries using the + * given reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all entries + * @since 1.8 + */ + public Map.Entry<K,V> reduceEntries(long parallelismThreshold, + BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceEntriesTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public <U> U reduceEntries(long parallelismThreshold, + Function<Map.Entry<K,V>, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesTask<K,V,U> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public double reduceEntriesToDouble(long parallelismThreshold, + ToDoubleFunction<Map.Entry<K,V>> transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToDoubleTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public long reduceEntriesToLong(long parallelismThreshold, + ToLongFunction<Map.Entry<K,V>> transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToLongTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public int reduceEntriesToInt(long parallelismThreshold, + ToIntFunction<Map.Entry<K,V>> transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToIntTask<K,V> + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + abstract static class CollectionView<K,V,E> + implements Collection<E>, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + final ConcurrentHashMap<K,V> map; + CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMap<K,V> getMap() { return map; } + + /** + * Removes all of the elements from this view, by removing all + * the mappings from the map backing this view. + */ + public final void clear() { map.clear(); } + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + + // implementations below rely on concrete classes supplying these + // abstract methods + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + */ + public abstract Iterator<E> iterator(); + public abstract boolean contains(Object o); + public abstract boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + for (E e : this) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = e; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final <T> T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + for (E e : this) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)e; + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + /** + * Returns a string representation of this collection. + * The string representation consists of the string representations + * of the collection's elements in the order they are returned by + * its iterator, enclosed in square brackets ({@code "[]"}). + * Adjacent elements are separated by the characters {@code ", "} + * (comma and space). Elements are converted to strings as by + * {@link String#valueOf(Object)}. + * + * @return a string representation of this collection + */ + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator<E> it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection<?> c) { + if (c != this) { + for (Object e : c) { + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection<?> c) { + boolean modified = false; + for (Iterator<E> it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection<?> c) { + boolean modified = false; + for (Iterator<E> it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMap as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. + * See {@link #keySet() keySet()}, + * {@link #keySet(Object) keySet(V)}, + * {@link #newKeySet() newKeySet()}, + * {@link #newKeySet(int) newKeySet(int)}. + * @since 1.8 + */ + public static class KeySetView<K,V> extends CollectionView<K,V,K> + implements Set<K>, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported + */ + public V getMappedValue() { return value; } + + /** + * {@inheritDoc} + * @throws NullPointerException if the specified key is null + */ + public boolean contains(Object o) { return map.containsKey(o); } + + /** + * Removes the key from this map view, by removing the key (and its + * corresponding value) from the backing map. This method does + * nothing if the key is not in the map. + * + * @param o the key to be removed from the backing map + * @return {@code true} if the backing map contained the specified key + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * @return an iterator over the keys of the backing map + */ + public Iterator<K> iterator() { + Node<K,V>[] t; + ConcurrentHashMap<K,V> m = map; + int f = (t = m.table) == null ? 0 : t.length; + return new KeyIterator<K,V>(t, f, 0, f, m); + } + + /** + * Adds the specified key to this set view by mapping the key to + * the default mapped value in the backing map, if defined. + * + * @param e key to be added + * @return {@code true} if this set changed as a result of the call + * @throws NullPointerException if the specified key is null + * @throws UnsupportedOperationException if no default mapped value + * for additions was provided + */ + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + return map.internalPut(e, v, true) == null; + } + + /** + * Adds all of the elements in the specified collection to this set, + * as if by calling {@link #add} on each one. + * + * @param c the elements to be inserted into this set + * @return {@code true} if this set changed as a result of the call + * @throws NullPointerException if the collection or any of its + * elements are {@code null} + * @throws UnsupportedOperationException if no default mapped value + * for additions was provided + */ + public boolean addAll(Collection<? extends K> c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (map.internalPut(e, v, true) == null) + added = true; + } + return added; + } + + public int hashCode() { + int h = 0; + for (K e : this) + h += e.hashCode(); + return h; + } + + public boolean equals(Object o) { + Set<?> c; + return ((o instanceof Set) && + ((c = (Set<?>)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + public Spliterator<K> spliterator() { + Node<K,V>[] t; + ConcurrentHashMap<K,V> m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); + } + + public void forEach(Consumer<? super K> action) { + if (action == null) throw new NullPointerException(); + Node<K,V>[] t; + if ((t = map.table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) + action.accept((K)p.key); + } + } + } + + /** + * A view of a ConcurrentHashMap as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values()}. + */ + static final class ValuesView<K,V> extends CollectionView<K,V,V> + implements Collection<V>, java.io.Serializable { + private static final long serialVersionUID = 2249069246763182397L; + ValuesView(ConcurrentHashMap<K,V> map) { super(map); } + public final boolean contains(Object o) { + return map.containsValue(o); + } + + public final boolean remove(Object o) { + if (o != null) { + for (Iterator<V> it = iterator(); it.hasNext();) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + public final Iterator<V> iterator() { + ConcurrentHashMap<K,V> m = map; + Node<K,V>[] t; + int f = (t = m.table) == null ? 0 : t.length; + return new ValueIterator<K,V>(t, f, 0, f, m); + } + + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection<? extends V> c) { + throw new UnsupportedOperationException(); + } + + public Spliterator<V> spliterator() { + Node<K,V>[] t; + ConcurrentHashMap<K,V> m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); + } + + public void forEach(Consumer<? super V> action) { + if (action == null) throw new NullPointerException(); + Node<K,V>[] t; + if ((t = map.table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) + action.accept(p.val); + } + } + } + + /** + * A view of a ConcurrentHashMap as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet()}. + */ + static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> + implements Set<Map.Entry<K,V>>, java.io.Serializable { + private static final long serialVersionUID = 2249069246763182397L; + EntrySetView(ConcurrentHashMap<K,V> map) { super(map); } + + public boolean contains(Object o) { + Object k, v, r; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + + public boolean remove(Object o) { + Object k, v; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * @return an iterator over the entries of the backing map + */ + public Iterator<Map.Entry<K,V>> iterator() { + ConcurrentHashMap<K,V> m = map; + Node<K,V>[] t; + int f = (t = m.table) == null ? 0 : t.length; + return new EntryIterator<K,V>(t, f, 0, f, m); + } + + public boolean add(Entry<K,V> e) { + return map.internalPut(e.getKey(), e.getValue(), false) == null; + } + + public boolean addAll(Collection<? extends Entry<K,V>> c) { + boolean added = false; + for (Entry<K,V> e : c) { + if (add(e)) + added = true; + } + return added; + } + + public final int hashCode() { + int h = 0; + Node<K,V>[] t; + if ((t = map.table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) { + h += p.hashCode(); + } + } + return h; + } + + public final boolean equals(Object o) { + Set<?> c; + return ((o instanceof Set) && + ((c = (Set<?>)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + public Spliterator<Map.Entry<K,V>> spliterator() { + Node<K,V>[] t; + ConcurrentHashMap<K,V> m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); + } + + public void forEach(Consumer<? super Map.Entry<K,V>> action) { + if (action == null) throw new NullPointerException(); + Node<K,V>[] t; + if ((t = map.table) != null) { + Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); + for (Node<K,V> p; (p = it.advance()) != null; ) + action.accept(new MapEntry<K,V>((K)p.key, p.val, map)); + } + } + + } + + // ------------------------------------------------------- + + /** + * Base class for bulk tasks. Repeats some fields and code from + * class Traverser, because we need to subclass CountedCompleter. + */ + abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { + Node<K,V>[] tab; // same as Traverser + Node<K,V> next; + int index; + int baseIndex; + int baseLimit; + final int baseSize; + int batch; // split control + + BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { + super(par); + this.batch = b; + this.index = this.baseIndex = i; + if ((this.tab = t) == null) + this.baseSize = this.baseLimit = 0; + else if (par == null) + this.baseSize = this.baseLimit = t.length; + else { + this.baseLimit = f; + this.baseSize = par.baseSize; + } + } + + /** + * Same as Traverser version + */ + final Node<K,V> advance() { + Node<K,V> e; + if ((e = next) != null) + e = e.next; + for (;;) { + Node<K,V>[] t; int i, n; Object ek; + if (e != null) + return next = e; + if (baseIndex >= baseLimit || (t = tab) == null || + (n = t.length) <= (i = index) || i < 0) + return next = null; + if ((e = tabAt(t, index)) != null && e.hash < 0) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin<K,V>)ek).first; + else { + tab = (Node<K,V>[])ek; + e = null; + continue; + } + } + if ((index += baseSize) >= n) + index = ++baseIndex; } } - - // Read the keys and values, and put the mappings in the table - for (;;) { - K key = (K) s.readObject(); - V value = (V) s.readObject(); - if (key == null) - break; - put(key, value); + } + + /* + * Task classes. Coded in a regular but ugly format/style to + * simplify checks that each variant differs in the right way from + * others. The null screenings exist because compilers cannot tell + * that we've already null-checked task arguments, so we force + * simplest hoisted bypass to help avoid convoluted traps. + */ + + static final class ForEachKeyTask<K,V> + extends BulkTask<K,V,Void> { + final Consumer<? super K> action; + ForEachKeyTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Consumer<? super K> action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer<? super K> action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachKeyTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node<K,V> p; (p = advance()) != null;) + action.accept((K)p.key); + propagateCompletion(); + } + } + } + + static final class ForEachValueTask<K,V> + extends BulkTask<K,V,Void> { + final Consumer<? super V> action; + ForEachValueTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Consumer<? super V> action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer<? super V> action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachValueTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node<K,V> p; (p = advance()) != null;) + action.accept(p.val); + propagateCompletion(); + } + } + } + + static final class ForEachEntryTask<K,V> + extends BulkTask<K,V,Void> { + final Consumer<? super Entry<K,V>> action; + ForEachEntryTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Consumer<? super Entry<K,V>> action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer<? super Entry<K,V>> action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachEntryTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + action.accept(p); + propagateCompletion(); + } + } + } + + static final class ForEachMappingTask<K,V> + extends BulkTask<K,V,Void> { + final BiConsumer<? super K, ? super V> action; + ForEachMappingTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + BiConsumer<? super K,? super V> action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final BiConsumer<? super K, ? super V> action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachMappingTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + action.accept((K)p.key, p.val); + propagateCompletion(); + } + } + } + + static final class ForEachTransformedKeyTask<K,V,U> + extends BulkTask<K,V,Void> { + final Function<? super K, ? extends U> transformer; + final Consumer<? super U> action; + ForEachTransformedKeyTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Function<? super K, ? extends U> transformer, Consumer<? super U> action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function<? super K, ? extends U> transformer; + final Consumer<? super U> action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedKeyTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class ForEachTransformedValueTask<K,V,U> + extends BulkTask<K,V,Void> { + final Function<? super V, ? extends U> transformer; + final Consumer<? super U> action; + ForEachTransformedValueTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Function<? super V, ? extends U> transformer, Consumer<? super U> action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function<? super V, ? extends U> transformer; + final Consumer<? super U> action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedValueTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.val)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class ForEachTransformedEntryTask<K,V,U> + extends BulkTask<K,V,Void> { + final Function<Map.Entry<K,V>, ? extends U> transformer; + final Consumer<? super U> action; + ForEachTransformedEntryTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function<Map.Entry<K,V>, ? extends U> transformer; + final Consumer<? super U> action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedEntryTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class ForEachTransformedMappingTask<K,V,U> + extends BulkTask<K,V,Void> { + final BiFunction<? super K, ? super V, ? extends U> transformer; + final Consumer<? super U> action; + ForEachTransformedMappingTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + BiFunction<? super K, ? super V, ? extends U> transformer, + Consumer<? super U> action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final BiFunction<? super K, ? super V, ? extends U> transformer; + final Consumer<? super U> action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedMappingTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key, p.val)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class SearchKeysTask<K,V,U> + extends BulkTask<K,V,U> { + final Function<? super K, ? extends U> searchFunction; + final AtomicReference<U> result; + SearchKeysTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Function<? super K, ? extends U> searchFunction, + AtomicReference<U> result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function<? super K, ? extends U> searchFunction; + final AtomicReference<U> result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchKeysTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node<K,V> p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply((K)p.key)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + static final class SearchValuesTask<K,V,U> + extends BulkTask<K,V,U> { + final Function<? super V, ? extends U> searchFunction; + final AtomicReference<U> result; + SearchValuesTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Function<? super V, ? extends U> searchFunction, + AtomicReference<U> result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function<? super V, ? extends U> searchFunction; + final AtomicReference<U> result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchValuesTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node<K,V> p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p.val)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + static final class SearchEntriesTask<K,V,U> + extends BulkTask<K,V,U> { + final Function<Entry<K,V>, ? extends U> searchFunction; + final AtomicReference<U> result; + SearchEntriesTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + Function<Entry<K,V>, ? extends U> searchFunction, + AtomicReference<U> result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function<Entry<K,V>, ? extends U> searchFunction; + final AtomicReference<U> result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchEntriesTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node<K,V> p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + return; + } + } + } + } + } + + static final class SearchMappingsTask<K,V,U> + extends BulkTask<K,V,U> { + final BiFunction<? super K, ? super V, ? extends U> searchFunction; + final AtomicReference<U> result; + SearchMappingsTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + BiFunction<? super K, ? super V, ? extends U> searchFunction, + AtomicReference<U> result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final BiFunction<? super K, ? super V, ? extends U> searchFunction; + final AtomicReference<U> result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchMappingsTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node<K,V> p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply((K)p.key, p.val)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + static final class ReduceKeysTask<K,V> + extends BulkTask<K,V,K> { + final BiFunction<? super K, ? super K, ? extends K> reducer; + K result; + ReduceKeysTask<K,V> rights, nextRight; + ReduceKeysTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + ReduceKeysTask<K,V> nextRight, + BiFunction<? super K, ? super K, ? extends K> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final K getRawResult() { return result; } + public final void compute() { + final BiFunction<? super K, ? super K, ? extends K> reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceKeysTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + K r = null; + for (Node<K,V> p; (p = advance()) != null; ) { + K u = (K)p.key; + r = (r == null) ? u : u == null ? r : reducer.apply(r, u); + } + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceKeysTask<K,V> + t = (ReduceKeysTask<K,V>)c, + s = t.rights; + while (s != null) { + K tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class ReduceValuesTask<K,V> + extends BulkTask<K,V,V> { + final BiFunction<? super V, ? super V, ? extends V> reducer; + V result; + ReduceValuesTask<K,V> rights, nextRight; + ReduceValuesTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + ReduceValuesTask<K,V> nextRight, + BiFunction<? super V, ? super V, ? extends V> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final V getRawResult() { return result; } + public final void compute() { + final BiFunction<? super V, ? super V, ? extends V> reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceValuesTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + V r = null; + for (Node<K,V> p; (p = advance()) != null; ) { + V v = p.val; + r = (r == null) ? v : reducer.apply(r, v); + } + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceValuesTask<K,V> + t = (ReduceValuesTask<K,V>)c, + s = t.rights; + while (s != null) { + V tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class ReduceEntriesTask<K,V> + extends BulkTask<K,V,Map.Entry<K,V>> { + final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; + Map.Entry<K,V> result; + ReduceEntriesTask<K,V> rights, nextRight; + ReduceEntriesTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + ReduceEntriesTask<K,V> nextRight, + BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final Map.Entry<K,V> getRawResult() { return result; } + public final void compute() { + final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceEntriesTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + Map.Entry<K,V> r = null; + for (Node<K,V> p; (p = advance()) != null; ) + r = (r == null) ? p : reducer.apply(r, p); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceEntriesTask<K,V> + t = (ReduceEntriesTask<K,V>)c, + s = t.rights; + while (s != null) { + Map.Entry<K,V> tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysTask<K,V,U> + extends BulkTask<K,V,U> { + final Function<? super K, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + U result; + MapReduceKeysTask<K,V,U> rights, nextRight; + MapReduceKeysTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceKeysTask<K,V,U> nextRight, + Function<? super K, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function<? super K, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysTask<K,V,U> + t = (MapReduceKeysTask<K,V,U>)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesTask<K,V,U> + extends BulkTask<K,V,U> { + final Function<? super V, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + U result; + MapReduceValuesTask<K,V,U> rights, nextRight; + MapReduceValuesTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceValuesTask<K,V,U> nextRight, + Function<? super V, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function<? super V, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.val)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesTask<K,V,U> + t = (MapReduceValuesTask<K,V,U>)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesTask<K,V,U> + extends BulkTask<K,V,U> { + final Function<Map.Entry<K,V>, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + U result; + MapReduceEntriesTask<K,V,U> rights, nextRight; + MapReduceEntriesTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceEntriesTask<K,V,U> nextRight, + Function<Map.Entry<K,V>, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function<Map.Entry<K,V>, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesTask<K,V,U> + t = (MapReduceEntriesTask<K,V,U>)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsTask<K,V,U> + extends BulkTask<K,V,U> { + final BiFunction<? super K, ? super V, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + U result; + MapReduceMappingsTask<K,V,U> rights, nextRight; + MapReduceMappingsTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceMappingsTask<K,V,U> nextRight, + BiFunction<? super K, ? super V, ? extends U> transformer, + BiFunction<? super U, ? super U, ? extends U> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final BiFunction<? super K, ? super V, ? extends U> transformer; + final BiFunction<? super U, ? super U, ? extends U> reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsTask<K,V,U> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node<K,V> p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key, p.val)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsTask<K,V,U> + t = (MapReduceMappingsTask<K,V,U>)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysToDoubleTask<K,V> + extends BulkTask<K,V,Double> { + final ToDoubleFunction<? super K> transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceKeysToDoubleTask<K,V> rights, nextRight; + MapReduceKeysToDoubleTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceKeysToDoubleTask<K,V> nextRight, + ToDoubleFunction<? super K> transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction<? super K> transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToDoubleTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToDoubleTask<K,V> + t = (MapReduceKeysToDoubleTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesToDoubleTask<K,V> + extends BulkTask<K,V,Double> { + final ToDoubleFunction<? super V> transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceValuesToDoubleTask<K,V> rights, nextRight; + MapReduceValuesToDoubleTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceValuesToDoubleTask<K,V> nextRight, + ToDoubleFunction<? super V> transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction<? super V> transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToDoubleTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToDoubleTask<K,V> + t = (MapReduceValuesToDoubleTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesToDoubleTask<K,V> + extends BulkTask<K,V,Double> { + final ToDoubleFunction<Map.Entry<K,V>> transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceEntriesToDoubleTask<K,V> rights, nextRight; + MapReduceEntriesToDoubleTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceEntriesToDoubleTask<K,V> nextRight, + ToDoubleFunction<Map.Entry<K,V>> transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction<Map.Entry<K,V>> transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToDoubleTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToDoubleTask<K,V> + t = (MapReduceEntriesToDoubleTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsToDoubleTask<K,V> + extends BulkTask<K,V,Double> { + final ToDoubleBiFunction<? super K, ? super V> transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceMappingsToDoubleTask<K,V> rights, nextRight; + MapReduceMappingsToDoubleTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceMappingsToDoubleTask<K,V> nextRight, + ToDoubleBiFunction<? super K, ? super V> transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleBiFunction<? super K, ? super V> transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToDoubleTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key, p.val)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToDoubleTask<K,V> + t = (MapReduceMappingsToDoubleTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysToLongTask<K,V> + extends BulkTask<K,V,Long> { + final ToLongFunction<? super K> transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceKeysToLongTask<K,V> rights, nextRight; + MapReduceKeysToLongTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceKeysToLongTask<K,V> nextRight, + ToLongFunction<? super K> transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction<? super K> transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToLongTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToLongTask<K,V> + t = (MapReduceKeysToLongTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesToLongTask<K,V> + extends BulkTask<K,V,Long> { + final ToLongFunction<? super V> transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceValuesToLongTask<K,V> rights, nextRight; + MapReduceValuesToLongTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceValuesToLongTask<K,V> nextRight, + ToLongFunction<? super V> transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction<? super V> transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToLongTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToLongTask<K,V> + t = (MapReduceValuesToLongTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesToLongTask<K,V> + extends BulkTask<K,V,Long> { + final ToLongFunction<Map.Entry<K,V>> transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceEntriesToLongTask<K,V> rights, nextRight; + MapReduceEntriesToLongTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceEntriesToLongTask<K,V> nextRight, + ToLongFunction<Map.Entry<K,V>> transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction<Map.Entry<K,V>> transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToLongTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToLongTask<K,V> + t = (MapReduceEntriesToLongTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsToLongTask<K,V> + extends BulkTask<K,V,Long> { + final ToLongBiFunction<? super K, ? super V> transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceMappingsToLongTask<K,V> rights, nextRight; + MapReduceMappingsToLongTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceMappingsToLongTask<K,V> nextRight, + ToLongBiFunction<? super K, ? super V> transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongBiFunction<? super K, ? super V> transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToLongTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key, p.val)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToLongTask<K,V> + t = (MapReduceMappingsToLongTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysToIntTask<K,V> + extends BulkTask<K,V,Integer> { + final ToIntFunction<? super K> transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceKeysToIntTask<K,V> rights, nextRight; + MapReduceKeysToIntTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceKeysToIntTask<K,V> nextRight, + ToIntFunction<? super K> transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction<? super K> transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToIntTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToIntTask<K,V> + t = (MapReduceKeysToIntTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesToIntTask<K,V> + extends BulkTask<K,V,Integer> { + final ToIntFunction<? super V> transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceValuesToIntTask<K,V> rights, nextRight; + MapReduceValuesToIntTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceValuesToIntTask<K,V> nextRight, + ToIntFunction<? super V> transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction<? super V> transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToIntTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToIntTask<K,V> + t = (MapReduceValuesToIntTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesToIntTask<K,V> + extends BulkTask<K,V,Integer> { + final ToIntFunction<Map.Entry<K,V>> transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceEntriesToIntTask<K,V> rights, nextRight; + MapReduceEntriesToIntTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceEntriesToIntTask<K,V> nextRight, + ToIntFunction<Map.Entry<K,V>> transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction<Map.Entry<K,V>> transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToIntTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToIntTask<K,V> + t = (MapReduceEntriesToIntTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsToIntTask<K,V> + extends BulkTask<K,V,Integer> { + final ToIntBiFunction<? super K, ? super V> transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceMappingsToIntTask<K,V> rights, nextRight; + MapReduceMappingsToIntTask + (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, + MapReduceMappingsToIntTask<K,V> nextRight, + ToIntBiFunction<? super K, ? super V> transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntBiFunction<? super K, ? super V> transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToIntTask<K,V> + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node<K,V> p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key, p.val)); + result = r; + CountedCompleter<?> c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToIntTask<K,V> + t = (MapReduceMappingsToIntTask<K,V>)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } } } // Unsafe mechanics - private static final sun.misc.Unsafe UNSAFE; - private static final long SBASE; - private static final int SSHIFT; - private static final long TBASE; - private static final int TSHIFT; - private static final long HASHSEED_OFFSET; - private static final long SEGSHIFT_OFFSET; - private static final long SEGMASK_OFFSET; - private static final long SEGMENTS_OFFSET; + private static final sun.misc.Unsafe U; + private static final long SIZECTL; + private static final long TRANSFERINDEX; + private static final long TRANSFERORIGIN; + private static final long BASECOUNT; + private static final long CELLSBUSY; + private static final long CELLVALUE; + private static final long ABASE; + private static final int ASHIFT; static { - int ss, ts; try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - Class<?> tc = HashEntry[].class; - Class<?> sc = Segment[].class; - TBASE = UNSAFE.arrayBaseOffset(tc); - SBASE = UNSAFE.arrayBaseOffset(sc); - ts = UNSAFE.arrayIndexScale(tc); - ss = UNSAFE.arrayIndexScale(sc); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("hashSeed")); - SEGSHIFT_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("segmentShift")); - SEGMASK_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("segmentMask")); - SEGMENTS_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("segments")); + U = sun.misc.Unsafe.getUnsafe(); + Class<?> k = ConcurrentHashMap.class; + SIZECTL = U.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + TRANSFERINDEX = U.objectFieldOffset + (k.getDeclaredField("transferIndex")); + TRANSFERORIGIN = U.objectFieldOffset + (k.getDeclaredField("transferOrigin")); + BASECOUNT = U.objectFieldOffset + (k.getDeclaredField("baseCount")); + CELLSBUSY = U.objectFieldOffset + (k.getDeclaredField("cellsBusy")); + Class<?> ck = Cell.class; + CELLVALUE = U.objectFieldOffset + (ck.getDeclaredField("value")); + Class<?> sc = Node[].class; + ABASE = U.arrayBaseOffset(sc); + int scale = U.arrayIndexScale(sc); + if ((scale & (scale - 1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); } catch (Exception e) { throw new Error(e); } - if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) - throw new Error("data type scale not a power of two"); - SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); - TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); } - }
--- a/src/share/classes/java/util/spi/LocaleServiceProvider.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/spi/LocaleServiceProvider.java Wed Jun 05 13:10:11 2013 -0300 @@ -128,6 +128,14 @@ * installed SPI providers, and "JRE" represents the locale sensitive services * in the Java Runtime Environment, the locale sensitive services in the SPI * providers are looked up first. + * <p> + * There are two other possible locale sensitive service providers, i.e., "CLDR" + * which is a provider based on Unicode Consortium's + * <a href="http://cldr.unicode.org/">CLDR Project</a>, and "HOST" which is a + * provider that reflects the user's custom settings in the underlying operating + * system. These two providers may not be available, depending on the Java Runtime + * Environment implementation. Specifying "JRE,SPI" is identical to the default + * behavior, which is compatibile with the prior releases. * * @since 1.6 */
--- a/src/share/classes/java/util/stream/DoubleStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/stream/DoubleStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -603,7 +603,7 @@ /** * Returns an {@link OptionalDouble} describing the first element of this * stream (in the encounter order), or an empty {@code OptionalDouble} if - * the stream is empty. If the stream has no encounter order, than any + * the stream is empty. If the stream has no encounter order, then any * element may be returned. * * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
--- a/src/share/classes/java/util/stream/IntStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/stream/IntStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -588,7 +588,7 @@ /** * Returns an {@link OptionalInt} describing the first element of this * stream (in the encounter order), or an empty {@code OptionalInt} if the - * stream is empty. If the stream has no encounter order, than any element + * stream is empty. If the stream has no encounter order, then any element * may be returned. * * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
--- a/src/share/classes/java/util/stream/LongStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/stream/LongStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -588,7 +588,7 @@ /** * Returns an {@link OptionalLong} describing the first element of this * stream (in the encounter order), or an empty {@code OptionalLong} if the - * stream is empty. If the stream has no encounter order, than any element + * stream is empty. If the stream has no encounter order, then any element * may be returned. * * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
--- a/src/share/classes/java/util/stream/Stream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/stream/Stream.java Wed Jun 05 13:10:11 2013 -0300 @@ -754,7 +754,7 @@ /** * Returns an {@link Optional} describing the first element of this stream * (in the encounter order), or an empty {@code Optional} if the stream is - * empty. If the stream has no encounter order, than any element may be + * empty. If the stream has no encounter order, then any element may be * returned. * * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
--- a/src/share/classes/java/util/stream/StreamBuilder.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/stream/StreamBuilder.java Wed Jun 05 13:10:11 2013 -0300 @@ -38,7 +38,7 @@ * <p>A {@code StreamBuilder} has a lifecycle, where it starts in a building * phase, during which elements can be added, and then transitions to a built * phase, after which elements may not be added. The built phase begins - * when the {@link #build()}} method is called, which creates an ordered + * when the {@link #build()} method is called, which creates an ordered * {@code Stream} whose elements are the elements that were added to the stream * builder, in the order they were added. * @@ -98,7 +98,7 @@ * <p>A stream builder has a lifecycle, where it starts in a building * phase, during which elements can be added, and then transitions to a * built phase, after which elements may not be added. The built phase - * begins when the {@link #build()}} method is called, which creates an + * begins when the {@link #build()} method is called, which creates an * ordered stream whose elements are the elements that were added to the * stream builder, in the order they were added. * @@ -155,7 +155,7 @@ * <p>A stream builder has a lifecycle, where it starts in a building * phase, during which elements can be added, and then transitions to a * built phase, after which elements may not be added. The built phase - * begins when the {@link #build()}} method is called, which creates an + * begins when the {@link #build()} method is called, which creates an * ordered stream whose elements are the elements that were added to the * stream builder, in the order they were added. * @@ -209,6 +209,13 @@ /** * A mutable builder for a {@code DoubleStream}. * + * <p>A stream builder has a lifecycle, where it starts in a building + * phase, during which elements can be added, and then transitions to a + * built phase, after which elements may not be added. The built phase + * begins when the {@link #build()} method is called, which creates an + * ordered stream whose elements are the elements that were added to the + * stream builder, in the order they were added. + * * @see LongStream#builder() * @since 1.8 */ @@ -217,13 +224,6 @@ /** * Adds an element to the stream being built. * - * <p>A stream builder has a lifecycle, where it starts in a building - * phase, during which elements can be added, and then transitions to a - * built phase, after which elements may not be added. The built phase - * begins when the {@link #build()}} method is called, which creates an - * ordered stream whose elements are the elements that were added to the - * stream builder, in the order they were added. - * * @throws IllegalStateException if the builder has already transitioned * to the built state */
--- a/src/share/classes/java/util/stream/StreamSupport.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/stream/StreamSupport.java Wed Jun 05 13:10:11 2013 -0300 @@ -41,7 +41,11 @@ * * @since 1.8 */ -public class StreamSupport { +public final class StreamSupport { + + // Suppresses default constructor, ensuring non-instantiability. + private StreamSupport() {} + /** * Creates a new sequential {@code Stream} from a {@code Spliterator}. * @@ -50,7 +54,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -75,7 +79,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -102,7 +106,7 @@ * * <p>For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #stream(java.util.Spliterator)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -138,7 +142,7 @@ * * <p>For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #stream(Spliterator)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -172,7 +176,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)}} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -195,7 +199,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)}} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -220,7 +224,7 @@ * * <p>For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #intStream(Spliterator.OfInt)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -254,7 +258,7 @@ * * <p>For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #intStream(Spliterator.OfInt)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -286,7 +290,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -310,7 +314,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -335,7 +339,7 @@ * * <p>For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #longStream(Spliterator.OfLong)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -369,7 +373,7 @@ * * <p>For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #longStream(Spliterator.OfLong)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -402,7 +406,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -426,7 +430,7 @@ * * <p>It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * <a href="Spliterator.html#binding">late-binding</a>. Otherwise, + * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * <a href="package-summary.html#Non-Interference">Non-Interference</a> for @@ -451,7 +455,7 @@ * <p> * For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #doubleStream(Spliterator.OfDouble)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -485,7 +489,7 @@ * * <p>For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * <a href="Spliterator.html#binding">late-binding</a>, it is likely + * <a href="../Spliterator.html#binding">late-binding</a>, it is likely * more efficient to use {@link #doubleStream(Spliterator.OfDouble)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the
--- a/src/share/classes/java/util/zip/ZipConstants.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/zip/ZipConstants.java Wed Jun 05 13:10:11 2013 -0300 @@ -69,6 +69,14 @@ static final int EXTLEN = 12; // uncompressed size /* + * Extra field header ID + */ + static final int EXTID_ZIP64 = 0x0001; // Zip64 + static final int EXTID_NTFS = 0x000a; // NTFS + static final int EXTID_UNIX = 0x000d; // UNIX + static final int EXTID_EXTT = 0x5455; // Info-ZIP Extended Timestamp + + /* * Central directory (CEN) header field offsets */ static final int CENVEM = 4; // version made by
--- a/src/share/classes/java/util/zip/ZipEntry.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/zip/ZipEntry.java Wed Jun 05 13:10:11 2013 -0300 @@ -25,8 +25,6 @@ package java.util.zip; -import java.util.Date; - /** * This class is used to represent a ZIP file entry. * @@ -35,7 +33,7 @@ public class ZipEntry implements ZipConstants, Cloneable { String name; // entry name - long time = -1; // modification time (in DOS time) + long mtime = -1; // last modification time long crc = -1; // crc-32 of entry data long size = -1; // uncompressed size of entry data long csize = -1; // compressed size of entry data @@ -79,7 +77,7 @@ */ public ZipEntry(ZipEntry e) { name = e.name; - time = e.time; + mtime = e.mtime; crc = e.crc; size = e.size; csize = e.csize; @@ -89,7 +87,7 @@ comment = e.comment; } - /* + /** * Creates a new un-initialized zip entry */ ZipEntry() {} @@ -103,22 +101,26 @@ } /** - * Sets the modification time of the entry. - * @param time the entry modification time in number of milliseconds - * since the epoch + * Sets the last modification time of the entry. + * + * @param time the last modification time of the entry in milliseconds since the epoch * @see #getTime() */ public void setTime(long time) { - this.time = javaToDosTime(time); + this.mtime = time; } /** - * Returns the modification time of the entry, or -1 if not specified. - * @return the modification time of the entry, or -1 if not specified + * Returns the last modification time of the entry. + * <p> The last modificatin time may come from zip entry's extensible + * data field {@code NTFS} or {@code Info-ZIP Extended Timestamp}, if + * the entry is read from {@link ZipInputStream} or {@link ZipFile}. + * + * @return the last modification time of the entry, or -1 if not specified * @see #setTime(long) */ public long getTime() { - return time != -1 ? dosToJavaTime(time) : -1; + return mtime; } /** @@ -277,35 +279,6 @@ return getName(); } - /* - * Converts DOS time to Java time (number of milliseconds since epoch). - */ - private static long dosToJavaTime(long dtime) { - @SuppressWarnings("deprecation") // Use of date constructor. - Date d = new Date((int)(((dtime >> 25) & 0x7f) + 80), - (int)(((dtime >> 21) & 0x0f) - 1), - (int)((dtime >> 16) & 0x1f), - (int)((dtime >> 11) & 0x1f), - (int)((dtime >> 5) & 0x3f), - (int)((dtime << 1) & 0x3e)); - return d.getTime(); - } - - /* - * Converts Java time to DOS time. - */ - @SuppressWarnings("deprecation") // Use of date methods - private static long javaToDosTime(long time) { - Date d = new Date(time); - int year = d.getYear() + 1900; - if (year < 1980) { - return (1 << 21) | (1 << 16); - } - return (year - 1980) << 25 | (d.getMonth() + 1) << 21 | - d.getDate() << 16 | d.getHours() << 11 | d.getMinutes() << 5 | - d.getSeconds() >> 1; - } - /** * Returns the hash code value for this entry. */
--- a/src/share/classes/java/util/zip/ZipFile.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/zip/ZipFile.java Wed Jun 05 13:10:11 2013 -0300 @@ -46,6 +46,7 @@ import java.util.stream.StreamSupport; import static java.util.zip.ZipConstants64.*; +import static java.util.zip.ZipUtils.*; /** * This class is used to read entries from a zip file. @@ -564,12 +565,44 @@ e.name = zc.toString(bname, bname.length); } } - e.time = getEntryTime(jzentry); e.crc = getEntryCrc(jzentry); e.size = getEntrySize(jzentry); e. csize = getEntryCSize(jzentry); e.method = getEntryMethod(jzentry); e.extra = getEntryBytes(jzentry, JZENTRY_EXTRA); + if (e.extra != null) { + byte[] extra = e.extra; + int len = e.extra.length; + int off = 0; + while (off + 4 < len) { + int pos = off; + int tag = get16(extra, pos); + int sz = get16(extra, pos + 2); + pos += 4; + if (pos + sz > len) // invalid data + break; + switch (tag) { + case EXTID_NTFS: + pos += 4; // reserved 4 bytes + if (get16(extra, pos) != 0x0001 || get16(extra, pos + 2) != 24) + break; + e.mtime = winToJavaTime(get64(extra, pos + 4)); + break; + case EXTID_EXTT: + int flag = Byte.toUnsignedInt(extra[pos++]); + if ((flag & 0x1) != 0) { + e.mtime = unixToJavaTime(get32(extra, pos)); + pos += 4; + } + break; + default: // unknown tag + } + off += (sz + 4); + } + } + if (e.mtime == -1) { + e.mtime = dosToJavaTime(getEntryTime(jzentry)); + } byte[] bcomm = getEntryBytes(jzentry, JZENTRY_COMMENT); if (bcomm == null) { e.comment = null;
--- a/src/share/classes/java/util/zip/ZipInputStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/zip/ZipInputStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -32,6 +32,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import static java.util.zip.ZipConstants64.*; +import static java.util.zip.ZipUtils.*; /** * This class implements an input stream filter for reading files in the @@ -302,7 +303,7 @@ throw new ZipException("encrypted ZIP entry not supported"); } e.method = get16(tmpbuf, LOCHOW); - e.time = get32(tmpbuf, LOCTIM); + e.mtime = dosToJavaTime(get32(tmpbuf, LOCTIM)); if ((flag & 8) == 8) { /* "Data Descriptor" present */ if (e.method != DEFLATED) { @@ -316,32 +317,51 @@ } len = get16(tmpbuf, LOCEXT); if (len > 0) { - byte[] bb = new byte[len]; - readFully(bb, 0, len); - e.setExtra(bb); + byte[] extra = new byte[len]; + readFully(extra, 0, len); + e.setExtra(extra); // extra fields are in "HeaderID(2)DataSize(2)Data... format - if (e.csize == ZIP64_MAGICVAL || e.size == ZIP64_MAGICVAL) { - int off = 0; - while (off + 4 < len) { - int sz = get16(bb, off + 2); - if (get16(bb, off) == ZIP64_EXTID) { - off += 4; - // LOC extra zip64 entry MUST include BOTH original and - // compressed file size fields - if (sz < 16 || (off + sz) > len ) { - // Invalid zip64 extra fields, simply skip. Even it's - // rare, it's possible the entry size happens to be - // the magic value and it "accidnetly" has some bytes - // in extra match the id. - return e; - } - e.size = get64(bb, off); - e.csize = get64(bb, off + 8); + int off = 0; + while (off + 4 < len) { + int pos = off; + int tag = get16(extra, pos); + int sz = get16(extra, pos + 2); + pos += 4; + if (pos + sz > len) // invalid data + break; + switch (tag) { + case EXTID_ZIP64 : + // LOC extra zip64 entry MUST include BOTH original and + // compressed file size fields. + // + // If invalid zip64 extra fields, simply skip. Even it's + // rare, it's possible the entry size happens to be + // the magic value and it "accidently" has some bytes + // in extra match the id. + if (sz >= 16 && (pos + sz) <= len ) { + e.size = get64(extra, pos); + e.csize = get64(extra, pos + 8); + } + break; + case EXTID_NTFS: + pos += 4; // reserved 4 bytes + if (get16(extra, pos) != 0x0001 || get16(extra, pos + 2) != 24) break; + // override the loc field, NTFS time has 'microsecond' granularity + e.mtime = winToJavaTime(get64(extra, pos + 4)); + break; + case EXTID_EXTT: + int flag = Byte.toUnsignedInt(extra[pos++]); + if ((flag & 0x1) != 0) { + e.mtime = unixToJavaTime(get32(extra, pos)); + pos += 4; } - off += (sz + 4); + break; + default: // unknown tag } + off += (sz + 4); } + } return e; } @@ -430,27 +450,4 @@ } } - /* - * Fetches unsigned 16-bit value from byte array at specified offset. - * The bytes are assumed to be in Intel (little-endian) byte order. - */ - private static final int get16(byte b[], int off) { - return Byte.toUnsignedInt(b[off]) | (Byte.toUnsignedInt(b[off+1]) << 8); - } - - /* - * Fetches unsigned 32-bit value from byte array at specified offset. - * The bytes are assumed to be in Intel (little-endian) byte order. - */ - private static final long get32(byte b[], int off) { - return (get16(b, off) | ((long)get16(b, off+2) << 16)) & 0xffffffffL; - } - - /* - * Fetches signed 64-bit value from byte array at specified offset. - * The bytes are assumed to be in Intel (little-endian) byte order. - */ - private static final long get64(byte b[], int off) { - return get32(b, off) | (get32(b, off+4) << 32); - } }
--- a/src/share/classes/java/util/zip/ZipOutputStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/java/util/zip/ZipOutputStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -32,6 +32,7 @@ import java.util.Vector; import java.util.HashSet; import static java.util.zip.ZipConstants64.*; +import static java.util.zip.ZipUtils.*; /** * This class implements an output stream filter for writing files in the @@ -190,7 +191,7 @@ if (current != null) { closeEntry(); // close previous entry } - if (e.time == -1) { + if (e.mtime == -1) { e.setTime(System.currentTimeMillis()); } if (e.method == -1) { @@ -382,16 +383,25 @@ private void writeLOC(XEntry xentry) throws IOException { ZipEntry e = xentry.entry; int flag = e.flag; + boolean hasZip64 = false; int elen = (e.extra != null) ? e.extra.length : 0; - boolean hasZip64 = false; - + int eoff = 0; + boolean foundEXTT = false; // if EXTT already present + // do nothing. + while (eoff + 4 < elen) { + int tag = get16(e.extra, eoff); + int sz = get16(e.extra, eoff + 2); + if (tag == EXTID_EXTT) { + foundEXTT = true; + } + eoff += (4 + sz); + } writeInt(LOCSIG); // LOC header signature - if ((flag & 8) == 8) { writeShort(version(e)); // version needed to extract writeShort(flag); // general purpose bit flag writeShort(e.method); // compression method - writeInt(e.time); // last modification time + writeInt(javaToDosTime(e.mtime)); // last modification time // store size, uncompressed size, and crc-32 in data descriptor // immediately following compressed entry data @@ -407,7 +417,7 @@ } writeShort(flag); // general purpose bit flag writeShort(e.method); // compression method - writeInt(e.time); // last modification time + writeInt(javaToDosTime(e.mtime)); // last modification time writeInt(e.crc); // crc-32 if (hasZip64) { writeInt(ZIP64_MAGICVAL); @@ -420,6 +430,8 @@ } byte[] nameBytes = zc.getBytes(e.name); writeShort(nameBytes.length); + if (!foundEXTT) + elen += 9; // use Info-ZIP's ext time in extra writeShort(elen); writeBytes(nameBytes, 0, nameBytes.length); if (hasZip64) { @@ -428,6 +440,12 @@ writeLong(e.size); writeLong(e.csize); } + if (!foundEXTT) { + writeShort(EXTID_EXTT); + writeShort(5); // size for the folowing data block + writeByte(0x1); // flags byte, mtime only + writeInt(javaToUnixTime(e.mtime)); + } if (e.extra != null) { writeBytes(e.extra, 0, e.extra.length); } @@ -457,25 +475,25 @@ ZipEntry e = xentry.entry; int flag = e.flag; int version = version(e); - long csize = e.csize; long size = e.size; long offset = xentry.offset; - int e64len = 0; + int elenZIP64 = 0; boolean hasZip64 = false; + if (e.csize >= ZIP64_MAGICVAL) { csize = ZIP64_MAGICVAL; - e64len += 8; // csize(8) + elenZIP64 += 8; // csize(8) hasZip64 = true; } if (e.size >= ZIP64_MAGICVAL) { size = ZIP64_MAGICVAL; // size(8) - e64len += 8; + elenZIP64 += 8; hasZip64 = true; } if (xentry.offset >= ZIP64_MAGICVAL) { offset = ZIP64_MAGICVAL; - e64len += 8; // offset(8) + elenZIP64 += 8; // offset(8) hasZip64 = true; } writeInt(CENSIG); // CEN header signature @@ -488,18 +506,32 @@ } writeShort(flag); // general purpose bit flag writeShort(e.method); // compression method - writeInt(e.time); // last modification time + writeInt(javaToDosTime(e.mtime)); // last modification time writeInt(e.crc); // crc-32 writeInt(csize); // compressed size writeInt(size); // uncompressed size byte[] nameBytes = zc.getBytes(e.name); writeShort(nameBytes.length); + + int elen = (e.extra != null) ? e.extra.length : 0; + int eoff = 0; + boolean foundEXTT = false; // if EXTT already present + // do nothing. + while (eoff + 4 < elen) { + int tag = get16(e.extra, eoff); + int sz = get16(e.extra, eoff + 2); + if (tag == EXTID_EXTT) { + foundEXTT = true; + } + eoff += (4 + sz); + } if (hasZip64) { // + headid(2) + datasize(2) - writeShort(e64len + 4 + (e.extra != null ? e.extra.length : 0)); - } else { - writeShort(e.extra != null ? e.extra.length : 0); + elen += (elenZIP64 + 4); } + if (!foundEXTT) + elen += 9; // Info-ZIP's Extended Timestamp + writeShort(elen); byte[] commentBytes; if (e.comment != null) { commentBytes = zc.getBytes(e.comment); @@ -515,7 +547,7 @@ writeBytes(nameBytes, 0, nameBytes.length); if (hasZip64) { writeShort(ZIP64_EXTID);// Zip64 extra - writeShort(e64len); + writeShort(elenZIP64); if (size == ZIP64_MAGICVAL) writeLong(e.size); if (csize == ZIP64_MAGICVAL) @@ -523,6 +555,12 @@ if (offset == ZIP64_MAGICVAL) writeLong(xentry.offset); } + if (!foundEXTT) { + writeShort(EXTID_EXTT); + writeShort(5); + writeByte(0x1); // flags byte + writeInt(javaToUnixTime(e.mtime)); + } if (e.extra != null) { writeBytes(e.extra, 0, e.extra.length); } @@ -589,6 +627,15 @@ } /* + * Writes a 8-bit byte to the output stream. + */ + private void writeByte(int v) throws IOException { + OutputStream out = this.out; + out.write(v & 0xff); + written += 1; + } + + /* * Writes a 16-bit short to the output stream in little-endian byte order. */ private void writeShort(int v) throws IOException {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/classes/java/util/zip/ZipUtils.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.util.zip; + +import java.util.Date; +import java.util.concurrent.TimeUnit; + +class ZipUtils { + + // used to adjust values between Windows and java epoch + private static final long WINDOWS_EPOCH_IN_MICROSECONDS = -11644473600000000L; + + /** + * Converts Windows time (in microseconds, UTC/GMT) time to Java time. + */ + public static final long winToJavaTime(long wtime) { + return TimeUnit.MILLISECONDS.convert( + wtime / 10 + WINDOWS_EPOCH_IN_MICROSECONDS, TimeUnit.MICROSECONDS); + } + + /** + * Converts Java time to Windows time. + */ + public static final long javaToWinTime(long time) { + return (TimeUnit.MICROSECONDS.convert(time, TimeUnit.MILLISECONDS) + - WINDOWS_EPOCH_IN_MICROSECONDS) * 10; + } + + /** + * Converts "standard Unix time"(in seconds, UTC/GMT) to Java time + */ + public static final long unixToJavaTime(long utime) { + return TimeUnit.MILLISECONDS.convert(utime, TimeUnit.SECONDS); + } + + /** + * Converts Java time to "standard Unix time". + */ + public static final long javaToUnixTime(long time) { + return TimeUnit.SECONDS.convert(time, TimeUnit.MILLISECONDS); + } + + /** + * Converts DOS time to Java time (number of milliseconds since epoch). + */ + public static long dosToJavaTime(long dtime) { + @SuppressWarnings("deprecation") // Use of date constructor. + Date d = new Date((int)(((dtime >> 25) & 0x7f) + 80), + (int)(((dtime >> 21) & 0x0f) - 1), + (int)((dtime >> 16) & 0x1f), + (int)((dtime >> 11) & 0x1f), + (int)((dtime >> 5) & 0x3f), + (int)((dtime << 1) & 0x3e)); + return d.getTime(); + } + + /** + * Converts Java time to DOS time. + */ + @SuppressWarnings("deprecation") // Use of date methods + public static long javaToDosTime(long time) { + Date d = new Date(time); + int year = d.getYear() + 1900; + if (year < 1980) { + return (1 << 21) | (1 << 16); + } + return (year - 1980) << 25 | (d.getMonth() + 1) << 21 | + d.getDate() << 16 | d.getHours() << 11 | d.getMinutes() << 5 | + d.getSeconds() >> 1; + } + + + /** + * Fetches unsigned 16-bit value from byte array at specified offset. + * The bytes are assumed to be in Intel (little-endian) byte order. + */ + public static final int get16(byte b[], int off) { + return Byte.toUnsignedInt(b[off]) | (Byte.toUnsignedInt(b[off+1]) << 8); + } + + /** + * Fetches unsigned 32-bit value from byte array at specified offset. + * The bytes are assumed to be in Intel (little-endian) byte order. + */ + public static final long get32(byte b[], int off) { + return (get16(b, off) | ((long)get16(b, off+2) << 16)) & 0xffffffffL; + } + + /** + * Fetches signed 64-bit value from byte array at specified offset. + * The bytes are assumed to be in Intel (little-endian) byte order. + */ + public static final long get64(byte b[], int off) { + return get32(b, off) | (get32(b, off+4) << 32); + } + +}
--- a/src/share/classes/javax/crypto/Cipher.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/javax/crypto/Cipher.java Wed Jun 05 13:10:11 2013 -0300 @@ -1158,6 +1158,9 @@ * determined from the given key, or if the given key has a keysize that * exceeds the maximum allowable keysize (as determined from the * configured jurisdiction policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key) throws InvalidKeyException { init(opmode, key, JceSecurity.RANDOM); @@ -1208,6 +1211,9 @@ * determined from the given key, or if the given key has a keysize that * exceeds the maximum allowable keysize (as determined from the * configured jurisdiction policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, SecureRandom random) throws InvalidKeyException @@ -1285,6 +1291,9 @@ * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameterSpec params) throws InvalidKeyException, InvalidAlgorithmParameterException @@ -1343,6 +1352,9 @@ * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameterSpec params, SecureRandom random) @@ -1416,6 +1428,9 @@ * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameters params) throws InvalidKeyException, InvalidAlgorithmParameterException @@ -1474,6 +1489,9 @@ * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameters params, SecureRandom random) @@ -1552,6 +1570,9 @@ * in the given certificate has a keysize that exceeds the maximum * allowable keysize (as determined by the configured jurisdiction policy * files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Certificate certificate) throws InvalidKeyException @@ -1619,6 +1640,9 @@ * in the given certificate has a keysize that exceeds the maximum * allowable keysize (as determined by the configured jurisdiction policy * files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Certificate certificate, SecureRandom random) @@ -2410,6 +2434,9 @@ * @exception InvalidKeyException if it is impossible or unsafe to * wrap the key with this cipher (e.g., a hardware protected key is * being passed to a software-only cipher). + * + * @throws UnsupportedOperationException if the corresponding method in the + * {@code CipherSpi} is not supported. */ public final byte[] wrap(Key key) throws IllegalBlockSizeException, InvalidKeyException { @@ -2451,6 +2478,9 @@ * @exception InvalidKeyException if <code>wrappedKey</code> does not * represent a wrapped key of type <code>wrappedKeyType</code> for * the <code>wrappedKeyAlgorithm</code>. + * + * @throws UnsupportedOperationException if the corresponding method in the + * {@code CipherSpi} is not supported. */ public final Key unwrap(byte[] wrappedKey, String wrappedKeyAlgorithm,
--- a/src/share/classes/javax/crypto/CipherInputStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/javax/crypto/CipherInputStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,6 +86,8 @@ private int ostart = 0; // the offset pointing to the last "new" byte private int ofinish = 0; + // stream status + private boolean closed = false; /** * private convenience function. @@ -293,14 +295,17 @@ * @since JCE1.2 */ public void close() throws IOException { + if (closed) { + return; + } + + closed = true; input.close(); try { // throw away the unprocessed data cipher.doFinal(); } - catch (BadPaddingException ex) { - } - catch (IllegalBlockSizeException ex) { + catch (BadPaddingException | IllegalBlockSizeException ex) { } ostart = 0; ofinish = 0;
--- a/src/share/classes/javax/crypto/CipherOutputStream.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/javax/crypto/CipherOutputStream.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,6 +74,9 @@ // the buffer holding data ready to be written out private byte[] obuffer; + // stream status + private boolean closed = false; + /** * * Constructs a CipherOutputStream from an OutputStream and a @@ -198,11 +201,14 @@ * @since JCE1.2 */ public void close() throws IOException { + if (closed) { + return; + } + + closed = true; try { obuffer = cipher.doFinal(); - } catch (IllegalBlockSizeException e) { - obuffer = null; - } catch (BadPaddingException e) { + } catch (IllegalBlockSizeException | BadPaddingException e) { obuffer = null; } try {
--- a/src/share/classes/javax/crypto/CipherSpi.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/javax/crypto/CipherSpi.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -347,6 +347,9 @@ * initializing this cipher, or requires * algorithm parameters that cannot be * determined from the given key. + * @throws UnsupportedOperationException if {@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} is not implemented + * by the cipher. */ protected abstract void engineInit(int opmode, Key key, SecureRandom random) @@ -399,6 +402,9 @@ * parameters are inappropriate for this cipher, * or if this cipher requires * algorithm parameters and <code>params</code> is null. + * @throws UnsupportedOperationException if {@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} is not implemented + * by the cipher. */ protected abstract void engineInit(int opmode, Key key, AlgorithmParameterSpec params, @@ -452,6 +458,9 @@ * parameters are inappropriate for this cipher, * or if this cipher requires * algorithm parameters and <code>params</code> is null. + * @throws UnsupportedOperationException if {@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} is not implemented + * by the cipher. */ protected abstract void engineInit(int opmode, Key key, AlgorithmParameters params, @@ -863,6 +872,8 @@ * @exception InvalidKeyException if it is impossible or unsafe to * wrap the key with this cipher (e.g., a hardware protected key is * being passed to a software-only cipher). + * + * @throws UnsupportedOperationException if this method is not supported. */ protected byte[] engineWrap(Key key) throws IllegalBlockSizeException, InvalidKeyException @@ -899,6 +910,8 @@ * @exception InvalidKeyException if <code>wrappedKey</code> does not * represent a wrapped key of type <code>wrappedKeyType</code> for * the <code>wrappedKeyAlgorithm</code>. + * + * @throws UnsupportedOperationException if this method is not supported. */ protected Key engineUnwrap(byte[] wrappedKey, String wrappedKeyAlgorithm,
--- a/src/share/classes/javax/swing/JToolTip.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/javax/swing/JToolTip.java Wed Jun 05 13:10:11 2013 -0300 @@ -31,6 +31,7 @@ import java.io.ObjectOutputStream; import java.io.ObjectInputStream; import java.io.IOException; +import java.util.Objects; /** @@ -128,6 +129,11 @@ String oldValue = this.tipText; this.tipText = tipText; firePropertyChange("tiptext", oldValue, tipText); + + if (!Objects.equals(oldValue, tipText)) { + revalidate(); + repaint(); + } } /**
--- a/src/share/classes/javax/swing/text/View.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/javax/swing/text/View.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1174,6 +1174,7 @@ // formed by added elements (i.e. they will be updated // by initialization. index0 = Math.max(index0, 0); + index1 = getViewIndex(elem.getDocument().getLength(), Position.Bias.Forward); for (int i = index0; i <= index1; i++) { if (! ((i >= hole0) && (i <= hole1))) { v = getView(i);
--- a/src/share/classes/javax/swing/text/html/parser/Parser.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/javax/swing/text/html/parser/Parser.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1980,11 +1980,12 @@ void parseScript() throws IOException { char[] charsToAdd = new char[SCRIPT_END_TAG.length]; + boolean insideComment = false; /* Here, ch should be the first character after <script> */ while (true) { int i = 0; - while (i < SCRIPT_END_TAG.length + while (!insideComment && i < SCRIPT_END_TAG.length && (SCRIPT_END_TAG[i] == ch || SCRIPT_END_TAG_UPPER_CASE[i] == ch)) { charsToAdd[i] = (char) ch; @@ -2025,6 +2026,13 @@ break; default: addString(ch); + String str = new String(getChars(0, strpos)); + if (!insideComment && str.endsWith(START_COMMENT)) { + insideComment = true; + } + if (insideComment && str.endsWith(END_COMMENT)) { + insideComment = false; + } ch = readCh(); break; } // switch
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/classes/sun/management/DiagnosticCommandArgumentInfo.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.management; + +/** + * Diagnostic Command Argument information. It contains the description + * of one parameter of the diagnostic command. A parameter can either be an + * option or an argument. Options are identified by the option name while + * arguments are identified by their position in the command line. The generic + * syntax of a diagnostic command is: + * <blockquote> + * <command name> [<option>=<value>] [<argument_value>] + * </blockquote> + * Example: + * <blockquote> + * command_name option1=value1 option2=value argumentA argumentB argumentC + * </blockquote> + * In this command line, the diagnostic command receives five parameters, two + * options named {@code option1} and {@code option2}, and three arguments. + * argumentA's position is 0, argumentB's position is 1 and argumentC's + * position is 2. + * + * @since 8 + */ + +class DiagnosticCommandArgumentInfo { + private final String name; + private final String description; + private final String type; + private final String defaultValue; + private final boolean mandatory; + private final boolean option; + private final boolean multiple; + private final int position; + + /** + * Returns the argument name. + * + * @return the argument name + */ + String getName() { + return name; + } + + /** + * Returns the argument description. + * + * @return the argument description + */ + String getDescription() { + return description; + } + + /** + * Returns the argument type. + * + * @return the argument type + */ + String getType() { + return type; + } + + /** + * Returns the default value as a String if a default value + * is defined, null otherwise. + * + * @return the default value as a String if a default value + * is defined, null otherwise. + */ + String getDefault() { + return defaultValue; + } + + /** + * Returns {@code true} if the argument is mandatory, + * {@code false} otherwise. + * + * @return {@code true} if the argument is mandatory, + * {@code false} otherwise + */ + boolean isMandatory() { + return mandatory; + } + + /** + * Returns {@code true} if the argument is an option, + * {@code false} otherwise. Options have to be specified using the + * <key>=<value> syntax on the command line, while other + * arguments are specified with a single <value> field and are + * identified by their position on command line. + * + * @return {@code true} if the argument is an option, + * {@code false} otherwise + */ + boolean isOption() { + return option; + } + + /** + * Returns {@code true} if the argument can be specified multiple times, + * {@code false} otherwise. + * + * @return {@code true} if the argument can be specified multiple times, + * {@code false} otherwise + */ + boolean isMultiple() { + return multiple; + } + + /** + * Returns the expected position of this argument if it is not an option, + * -1 otherwise. Argument position if defined from left to right, + * starting at zero and ignoring the diagnostic command name and + * options. + * + * @return the expected position of this argument if it is not an option, + * -1 otherwise. + */ + int getPosition() { + return position; + } + + DiagnosticCommandArgumentInfo(String name, String description, + String type, String defaultValue, + boolean mandatory, boolean option, + boolean multiple, int position) { + this.name = name; + this.description = description; + this.type = type; + this.defaultValue = defaultValue; + this.mandatory = mandatory; + this.option = option; + this.multiple = multiple; + this.position = position; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/classes/sun/management/DiagnosticCommandImpl.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.management; + +import com.sun.management.DiagnosticCommandMBean; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.security.Permission; +import java.util.*; +import javax.management.*; + +/** + * Implementation class for the diagnostic commands subsystem. + * + * @since 8 + */ +class DiagnosticCommandImpl extends NotificationEmitterSupport + implements DiagnosticCommandMBean { + + private final VMManagement jvm; + private volatile Map<String, Wrapper> wrappers = null; + private static final String strClassName = "".getClass().getName(); + private static final String strArrayClassName = String[].class.getName(); + private final boolean isSupported; + + @Override + public Object getAttribute(String attribute) throws AttributeNotFoundException, + MBeanException, ReflectionException { + throw new AttributeNotFoundException(attribute); + } + + @Override + public void setAttribute(Attribute attribute) throws AttributeNotFoundException, + InvalidAttributeValueException, MBeanException, ReflectionException { + throw new AttributeNotFoundException(attribute.getName()); + } + + @Override + public AttributeList getAttributes(String[] attributes) { + return new AttributeList(); + } + + @Override + public AttributeList setAttributes(AttributeList attributes) { + return new AttributeList(); + } + + private class Wrapper { + + String name; + String cmd; + DiagnosticCommandInfo info; + Permission permission; + + Wrapper(String name, String cmd, DiagnosticCommandInfo info) + throws InstantiationException { + this.name = name; + this.cmd = cmd; + this.info = info; + this.permission = null; + Exception cause = null; + if (info.getPermissionClass() != null) { + try { + Class c = Class.forName(info.getPermissionClass()); + if (info.getPermissionAction() == null) { + try { + Constructor constructor = c.getConstructor(String.class); + permission = (Permission) constructor.newInstance(info.getPermissionName()); + + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + cause = ex; + } + } + if (permission == null) { + try { + Constructor constructor = c.getConstructor(String.class, String.class); + permission = (Permission) constructor.newInstance( + info.getPermissionName(), + info.getPermissionAction()); + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + cause = ex; + } + } + } catch (ClassNotFoundException ex) { } + if (permission == null) { + InstantiationException iex = + new InstantiationException("Unable to instantiate required permission"); + iex.initCause(cause); + } + } + } + + public String execute(String[] args) { + if (permission != null) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(permission); + } + } + if(args == null) { + return executeDiagnosticCommand(cmd); + } else { + StringBuilder sb = new StringBuilder(); + sb.append(cmd); + for(int i=0; i<args.length; i++) { + if(args[i] == null) { + throw new IllegalArgumentException("Invalid null argument"); + } + sb.append(" "); + sb.append(args[i]); + } + return executeDiagnosticCommand(sb.toString()); + } + } + } + + DiagnosticCommandImpl(VMManagement jvm) { + this.jvm = jvm; + isSupported = jvm.isRemoteDiagnosticCommandsSupported(); + } + + private static class OperationInfoComparator implements Comparator<MBeanOperationInfo> { + @Override + public int compare(MBeanOperationInfo o1, MBeanOperationInfo o2) { + return o1.getName().compareTo(o2.getName()); + } + } + + @Override + public MBeanInfo getMBeanInfo() { + SortedSet<MBeanOperationInfo> operations = new TreeSet<>(new OperationInfoComparator()); + Map<String, Wrapper> wrappersmap; + if (!isSupported) { + wrappersmap = (Map<String, Wrapper>) Collections.EMPTY_MAP; + } else { + try { + String[] command = getDiagnosticCommands(); + DiagnosticCommandInfo[] info = getDiagnosticCommandInfo(command); + MBeanParameterInfo stringArgInfo[] = new MBeanParameterInfo[]{ + new MBeanParameterInfo("arguments", strArrayClassName, + "Array of Diagnostic Commands Arguments and Options") + }; + wrappersmap = new HashMap<>(); + for (int i = 0; i < command.length; i++) { + String name = transform(command[i]); + try { + Wrapper w = new Wrapper(name, command[i], info[i]); + wrappersmap.put(name, w); + operations.add(new MBeanOperationInfo( + w.name, + w.info.getDescription(), + (w.info.getArgumentsInfo() == null + || w.info.getArgumentsInfo().isEmpty()) + ? null : stringArgInfo, + strClassName, + MBeanOperationInfo.ACTION_INFO, + commandDescriptor(w))); + } catch (InstantiationException ex) { + // If for some reasons the creation of a diagnostic command + // wrappers fails, the diagnostic command is just ignored + // and won't appear in the DynamicMBean + } + } + } catch (IllegalArgumentException | UnsupportedOperationException e) { + wrappersmap = (Map<String, Wrapper>) Collections.EMPTY_MAP; + } + } + wrappers = Collections.unmodifiableMap(wrappersmap); + HashMap<String, Object> map = new HashMap<>(); + map.put("immutableInfo", "false"); + map.put("interfaceClassName","com.sun.management.DiagnosticCommandMBean"); + map.put("mxbean", "false"); + Descriptor desc = new ImmutableDescriptor(map); + return new MBeanInfo( + this.getClass().getName(), + "Diagnostic Commands", + null, // attributes + null, // constructors + operations.toArray(new MBeanOperationInfo[operations.size()]), // operations + getNotificationInfo(), // notifications + desc); + } + + @Override + public Object invoke(String actionName, Object[] params, String[] signature) + throws MBeanException, ReflectionException { + if (!isSupported) { + throw new UnsupportedOperationException(); + } + if (wrappers == null) { + getMBeanInfo(); + } + Wrapper w = wrappers.get(actionName); + if (w != null) { + if (w.info.getArgumentsInfo().isEmpty() + && (params == null || params.length == 0) + && (signature == null || signature.length == 0)) { + return w.execute(null); + } else if((params != null && params.length == 1) + && (signature != null && signature.length == 1 + && signature[0] != null + && signature[0].compareTo(strArrayClassName) == 0)) { + return w.execute((String[]) params[0]); + } + } + throw new ReflectionException(new NoSuchMethodException(actionName)); + } + + private static String transform(String name) { + StringBuilder sb = new StringBuilder(); + boolean toLower = true; + boolean toUpper = false; + for (int i = 0; i < name.length(); i++) { + char c = name.charAt(i); + if (c == '.' || c == '_') { + toLower = false; + toUpper = true; + } else { + if (toUpper) { + toUpper = false; + sb.append(Character.toUpperCase(c)); + } else if(toLower) { + sb.append(Character.toLowerCase(c)); + } else { + sb.append(c); + } + } + } + return sb.toString(); + } + + private Descriptor commandDescriptor(Wrapper w) throws IllegalArgumentException { + HashMap<String, Object> map = new HashMap<>(); + map.put("dcmd.name", w.info.getName()); + map.put("dcmd.description", w.info.getDescription()); + map.put("dcmd.vmImpact", w.info.getImpact()); + map.put("dcmd.permissionClass", w.info.getPermissionClass()); + map.put("dcmd.permissionName", w.info.getPermissionName()); + map.put("dcmd.permissionAction", w.info.getPermissionAction()); + map.put("dcmd.enabled", w.info.isEnabled()); + StringBuilder sb = new StringBuilder(); + sb.append("help "); + sb.append(w.info.getName()); + map.put("dcmd.help", executeDiagnosticCommand(sb.toString())); + if (w.info.getArgumentsInfo() != null && !w.info.getArgumentsInfo().isEmpty()) { + HashMap<String, Object> allargmap = new HashMap<>(); + for (DiagnosticCommandArgumentInfo arginfo : w.info.getArgumentsInfo()) { + HashMap<String, Object> argmap = new HashMap<>(); + argmap.put("dcmd.arg.name", arginfo.getName()); + argmap.put("dcmd.arg.type", arginfo.getType()); + argmap.put("dcmd.arg.description", arginfo.getDescription()); + argmap.put("dcmd.arg.isMandatory", arginfo.isMandatory()); + argmap.put("dcmd.arg.isMultiple", arginfo.isMultiple()); + boolean isOption = arginfo.isOption(); + argmap.put("dcmd.arg.isOption", isOption); + if(!isOption) { + argmap.put("dcmd.arg.position", arginfo.getPosition()); + } else { + argmap.put("dcmd.arg.position", -1); + } + allargmap.put(arginfo.getName(), new ImmutableDescriptor(argmap)); + } + map.put("dcmd.arguments", new ImmutableDescriptor(allargmap)); + } + return new ImmutableDescriptor(map); + } + + private final static String notifName = + "javax.management.Notification"; + + private final static String[] diagFramNotifTypes = { + "jmx.mbean.info.changed" + }; + + private MBeanNotificationInfo[] notifInfo = null; + + @Override + public MBeanNotificationInfo[] getNotificationInfo() { + synchronized (this) { + if (notifInfo == null) { + notifInfo = new MBeanNotificationInfo[1]; + notifInfo[0] = + new MBeanNotificationInfo(diagFramNotifTypes, + notifName, + "Diagnostic Framework Notification"); + } + } + return notifInfo; + } + + private static long seqNumber = 0; + private static long getNextSeqNumber() { + return ++seqNumber; + } + + private void createDiagnosticFrameworkNotification() { + + if (!hasListeners()) { + return; + } + ObjectName on = null; + try { + on = ObjectName.getInstance(ManagementFactoryHelper.HOTSPOT_DIAGNOSTIC_COMMAND_MBEAN_NAME); + } catch (MalformedObjectNameException e) { } + Notification notif = new Notification("jmx.mbean.info.changed", + on, + getNextSeqNumber()); + notif.setUserData(getMBeanInfo()); + sendNotification(notif); + } + + @Override + public synchronized void addNotificationListener(NotificationListener listener, + NotificationFilter filter, + Object handback) { + boolean before = hasListeners(); + super.addNotificationListener(listener, filter, handback); + boolean after = hasListeners(); + if (!before && after) { + setNotificationEnabled(true); + } + } + + @Override + public synchronized void removeNotificationListener(NotificationListener listener) + throws ListenerNotFoundException { + boolean before = hasListeners(); + super.removeNotificationListener(listener); + boolean after = hasListeners(); + if (before && !after) { + setNotificationEnabled(false); + } + } + + @Override + public synchronized void removeNotificationListener(NotificationListener listener, + NotificationFilter filter, + Object handback) + throws ListenerNotFoundException { + boolean before = hasListeners(); + super.removeNotificationListener(listener, filter, handback); + boolean after = hasListeners(); + if (before && !after) { + setNotificationEnabled(false); + } + } + + private native void setNotificationEnabled(boolean enabled); + private native String[] getDiagnosticCommands(); + private native DiagnosticCommandInfo[] getDiagnosticCommandInfo(String[] commands); + private native String executeDiagnosticCommand(String command); + +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/classes/sun/management/DiagnosticCommandInfo.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.management; + +import java.util.List; + +/** + * Diagnostic command information. It contains the description of a + * diagnostic command. + * + * @since 8 + */ + +class DiagnosticCommandInfo { + private final String name; + private final String description; + private final String impact; + private final String permissionClass; + private final String permissionName; + private final String permissionAction; + private final boolean enabled; + private final List<DiagnosticCommandArgumentInfo> arguments; + + /** + * Returns the diagnostic command name. + * + * @return the diagnostic command name + */ + String getName() { + return name; + } + + /** + * Returns the diagnostic command description. + * + * @return the diagnostic command description + */ + String getDescription() { + return description; + } + + /** + * Returns the potential impact of the diagnostic command execution + * on the Java virtual machine behavior. + * + * @return the potential impact of the diagnostic command execution + * on the Java virtual machine behavior + */ + String getImpact() { + return impact; + } + + /** + * Returns the name of the permission class required to be allowed + * to invoke the diagnostic command, or null if no permission + * is required. + * + * @return the name of the permission class name required to be allowed + * to invoke the diagnostic command, or null if no permission + * is required + */ + String getPermissionClass() { + return permissionClass; + } + + /** + * Returns the permission name required to be allowed to invoke the + * diagnostic command, or null if no permission is required. + * + * @return the permission name required to be allowed to invoke the + * diagnostic command, or null if no permission is required + */ + String getPermissionName() { + return permissionName; + } + + /** + * Returns the permission action required to be allowed to invoke the + * diagnostic command, or null if no permission is required or + * if the permission has no action specified. + * + * @return the permission action required to be allowed to invoke the + * diagnostic command, or null if no permission is required or + * if the permission has no action specified + */ + String getPermissionAction() { + return permissionAction; + } + + /** + * Returns {@code true} if the diagnostic command is enabled, + * {@code false} otherwise. The enabled/disabled + * status of a diagnostic command can evolve during + * the lifetime of the Java virtual machine. + * + * @return {@code true} if the diagnostic command is enabled, + * {@code false} otherwise + */ + boolean isEnabled() { + return enabled; + } + + /** + * Returns the list of the diagnostic command arguments description. + * If the diagnostic command has no arguments, it returns an empty list. + * + * @return a list of the diagnostic command arguments description + */ + List<DiagnosticCommandArgumentInfo> getArgumentsInfo() { + return arguments; + } + + DiagnosticCommandInfo(String name, String description, + String impact, String permissionClass, + String permissionName, String permissionAction, + boolean enabled, + List<DiagnosticCommandArgumentInfo> arguments) + { + this.name = name; + this.description = description; + this.impact = impact; + this.permissionClass = permissionClass; + this.permissionName = permissionName; + this.permissionAction = permissionAction; + this.enabled = enabled; + this.arguments = arguments; + } +}
--- a/src/share/classes/sun/management/ManagementFactoryHelper.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/management/ManagementFactoryHelper.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ import java.lang.management.*; +import javax.management.DynamicMBean; import javax.management.InstanceAlreadyExistsException; import javax.management.InstanceNotFoundException; import javax.management.MBeanServer; @@ -42,7 +43,9 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import com.sun.management.DiagnosticCommandMBean; import com.sun.management.OSMBeanFactory; import com.sun.management.HotSpotDiagnosticMXBean; @@ -263,6 +266,7 @@ private static HotspotThread hsThreadMBean = null; private static HotspotCompilation hsCompileMBean = null; private static HotspotMemory hsMemoryMBean = null; + private static DiagnosticCommandImpl hsDiagCommandMBean = null; public static synchronized HotSpotDiagnosticMXBean getDiagnosticMXBean() { if (hsDiagMBean == null) { @@ -311,6 +315,14 @@ return hsMemoryMBean; } + public static synchronized DiagnosticCommandMBean getDiagnosticCommandMBean() { + // Remote Diagnostic Commands may not be supported + if (hsDiagCommandMBean == null && jvm.isRemoteDiagnosticCommandsSupported()) { + hsDiagCommandMBean = new DiagnosticCommandImpl(jvm); + } + return hsDiagCommandMBean; + } + /** * This method is for testing only. */ @@ -365,6 +377,18 @@ private final static String HOTSPOT_THREAD_MBEAN_NAME = "sun.management:type=HotspotThreading"; + final static String HOTSPOT_DIAGNOSTIC_COMMAND_MBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static HashMap<ObjectName, DynamicMBean> getPlatformDynamicMBeans() { + HashMap<ObjectName, DynamicMBean> map = new HashMap<>(); + DiagnosticCommandMBean diagMBean = getDiagnosticCommandMBean(); + if (diagMBean != null) { + map.put(Util.newObjectName(HOTSPOT_DIAGNOSTIC_COMMAND_MBEAN_NAME), diagMBean); + } + return map; + } + static void registerInternalMBeans(MBeanServer mbs) { // register all internal MBeans if not registered // No exception is thrown if a MBean with that object name
--- a/src/share/classes/sun/management/VMManagement.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/management/VMManagement.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ public boolean isThreadAllocatedMemorySupported(); public boolean isThreadAllocatedMemoryEnabled(); public boolean isGcNotificationSupported(); + public boolean isRemoteDiagnosticCommandsSupported(); // Class Loading Subsystem public long getTotalClassCount();
--- a/src/share/classes/sun/management/VMManagementImpl.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/management/VMManagementImpl.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,6 +57,7 @@ private static boolean synchronizerUsageSupport; private static boolean threadAllocatedMemorySupport; private static boolean gcNotificationSupport; + private static boolean remoteDiagnosticCommandsSupport; static { @@ -106,6 +107,10 @@ return gcNotificationSupport; } + public boolean isRemoteDiagnosticCommandsSupported() { + return remoteDiagnosticCommandsSupport; + } + public native boolean isThreadContentionMonitoringEnabled(); public native boolean isThreadCpuTimeEnabled(); public native boolean isThreadAllocatedMemoryEnabled();
--- a/src/share/classes/sun/management/jdp/JdpPacketWriter.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/management/jdp/JdpPacketWriter.java Wed Jun 05 13:10:11 2013 -0300 @@ -60,9 +60,12 @@ */ public void addEntry(String entry) throws IOException { - pkt.writeShort(entry.length()); - byte[] b = entry.getBytes("UTF-8"); - pkt.write(b); + /* DataOutputStream.writeUTF() do essentially + * the same as: + * pkt.writeShort(entry.getBytes("UTF-8").length); + * pkt.write(entry.getBytes("UTF-8")); + */ + pkt.writeUTF(entry); } /**
--- a/src/share/classes/sun/misc/Contended.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/misc/Contended.java Wed Jun 05 13:10:11 2013 -0300 @@ -31,7 +31,42 @@ import java.lang.annotation.Target; /** - * This annotation marks classes and fields as considered to be contended. + * <p>An annotation expressing that objects and/or their fields are + * expected to encounter memory contention, generally in the form of + * "false sharing". This annotation serves as a hint that such objects + * and fields should reside in locations isolated from those of other + * objects or fields. Susceptibility to memory contention is a + * property of the intended usages of objects and fields, not their + * types or qualifiers. The effects of this annotation will nearly + * always add significant space overhead to objects. The use of + * {@code @Contended} is warranted only when the performance impact of + * this time/space tradeoff is intrinsically worthwhile; for example, + * in concurrent contexts in which each instance of the annotated + * class is often accessed by a different thread. + * + * <p>A {@code @Contended} field annotation may optionally include a + * <i>contention group</i> tag. A contention group defines a set of one + * or more fields that collectively must be isolated from all other + * contention groups. The fields in the same contention group may not be + * pairwise isolated. With no contention group tag (or with the default + * empty tag: "") each {@code @Contended} field resides in its own + * <i>distinct</i> and <i>anonymous</i> contention group. + * + * <p>When the annotation is used at the class level, the effect is + * equivalent to grouping all the declared fields not already having the + * {@code @Contended} annotation into the same anonymous group. + * With the class level annotation, implementations may choose different + * isolation techniques, such as isolating the entire object, rather than + * isolating distinct fields. A contention group tag has no meaning + * in a class level {@code @Contended} annotation, and is ignored. + * + * <p>The class level {@code @Contended} annotation is not inherited and has + * no effect on the fields declared in any sub-classes. The effects of all + * {@code @Contended} annotations, however, remain in force for all + * subclass instances, providing isolation of all the defined contention + * groups. Contention group tags are not inherited, and the same tag used + * in a superclass and subclass, represent distinct contention groups. + * * @since 1.8 */ @Retention(RetentionPolicy.RUNTIME) @@ -39,7 +74,10 @@ public @interface Contended { /** - Defines the contention group tag. + * The (optional) contention group tag. + * This tag is only meaningful for field level annotations. + * + * @return contention group tag. */ String value() default ""; }
--- a/src/share/classes/sun/misc/Hashing.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/misc/Hashing.java Wed Jun 05 13:10:11 2013 -0300 @@ -24,7 +24,7 @@ */ package sun.misc; -import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; /** * Hashing utilities. @@ -207,28 +207,16 @@ } /** - * Holds references to things that can't be initialized until after VM - * is fully booted. + * Return a non-zero 32-bit pseudo random value. The {@code instance} object + * may be used as part of the value. + * + * @param instance an object to use if desired in choosing value. + * @return a non-zero 32-bit pseudo random value. */ - private static class Holder { - - /** - * Used for generating per-instance hash seeds. - * - * We try to improve upon the default seeding. - */ - static final Random SEED_MAKER = new Random( - Double.doubleToRawLongBits(Math.random()) - ^ System.identityHashCode(Hashing.class) - ^ System.currentTimeMillis() - ^ System.nanoTime() - ^ Runtime.getRuntime().freeMemory()); - } - public static int randomHashSeed(Object instance) { int seed; if (sun.misc.VM.isBooted()) { - seed = Holder.SEED_MAKER.nextInt(); + seed = ThreadLocalRandom.current().nextInt(); } else { // lower quality "random" seed value--still better than zero and not // not practically reversible.
--- a/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java Wed Jun 05 13:10:11 2013 -0300 @@ -3158,6 +3158,7 @@ private boolean marked = false; private int inCache = 0; private int markCount = 0; + private boolean closed; // false public HttpInputStream (InputStream is) { super (is); @@ -3233,8 +3234,14 @@ } } + private void ensureOpen() throws IOException { + if (closed) + throw new IOException("stream is closed"); + } + @Override public int read() throws IOException { + ensureOpen(); try { byte[] b = new byte[1]; int ret = read(b); @@ -3254,6 +3261,7 @@ @Override public int read(byte[] b, int off, int len) throws IOException { + ensureOpen(); try { int newLen = super.read(b, off, len); int nWrite; @@ -3291,7 +3299,7 @@ @Override public long skip (long n) throws IOException { - + ensureOpen(); long remaining = n; int nr; if (skipBuffer == null) @@ -3317,6 +3325,9 @@ @Override public void close () throws IOException { + if (closed) + return; + try { if (outputStream != null) { if (read() != -1) { @@ -3332,6 +3343,7 @@ } throw ioex; } finally { + closed = true; HttpURLConnection.this.http = null; checkResponseCredentials (true); }
--- a/src/share/classes/sun/security/pkcs11/P11KeyAgreement.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/security/pkcs11/P11KeyAgreement.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -330,7 +330,7 @@ // as here we always retrieve the CKA_VALUE even for tokens // that do not have that bug. byte[] keyBytes = key.getEncoded(); - byte[] newBytes = P11Util.trimZeroes(keyBytes); + byte[] newBytes = KeyUtil.trimZeroes(keyBytes); if (keyBytes != newBytes) { key = new SecretKeySpec(newBytes, algorithm); }
--- a/src/share/classes/sun/security/pkcs11/P11Signature.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/security/pkcs11/P11Signature.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +41,7 @@ import sun.security.pkcs11.wrapper.*; import static sun.security.pkcs11.wrapper.PKCS11Constants.*; +import sun.security.util.KeyUtil; /** * Signature implementation class. This class currently supports the @@ -697,8 +698,8 @@ BigInteger r = values[0].getPositiveBigInteger(); BigInteger s = values[1].getPositiveBigInteger(); // trim leading zeroes - byte[] br = P11Util.trimZeroes(r.toByteArray()); - byte[] bs = P11Util.trimZeroes(s.toByteArray()); + byte[] br = KeyUtil.trimZeroes(r.toByteArray()); + byte[] bs = KeyUtil.trimZeroes(s.toByteArray()); int k = Math.max(br.length, bs.length); // r and s each occupy half the array byte[] res = new byte[k << 1];
--- a/src/share/classes/sun/security/pkcs11/P11Util.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/security/pkcs11/P11Util.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,20 +131,6 @@ return b; } - // trim leading (most significant) zeroes from the result - static byte[] trimZeroes(byte[] b) { - int i = 0; - while ((i < b.length - 1) && (b[i] == 0)) { - i++; - } - if (i == 0) { - return b; - } - byte[] t = new byte[b.length - i]; - System.arraycopy(b, i, t, 0, t.length); - return t; - } - public static byte[] getMagnitude(BigInteger bi) { byte[] b = bi.toByteArray(); if ((b.length > 1) && (b[0] == 0)) {
--- a/src/share/classes/sun/security/util/KeyUtil.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/security/util/KeyUtil.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -200,5 +200,24 @@ // Don't bother to check against the y^q mod p if safe primes are used. } + + /** + * Trim leading (most significant) zeroes from the result. + * + * @throws NullPointerException if {@code b} is null + */ + public static byte[] trimZeroes(byte[] b) { + int i = 0; + while ((i < b.length - 1) && (b[i] == 0)) { + i++; + } + if (i == 0) { + return b; + } + byte[] t = new byte[b.length - i]; + System.arraycopy(b, i, t, 0, t.length); + return t; + } + }
--- a/src/share/classes/sun/tools/jconsole/SummaryTab.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/tools/jconsole/SummaryTab.java Wed Jun 05 13:10:11 2013 -0300 @@ -360,6 +360,8 @@ Math.min(99F, elapsedCpu / (elapsedTime * 10000F * result.nCPUs)); + cpuUsage = Math.max(0F, cpuUsage); + getPlotter().addValues(result.timeStamp, Math.round(cpuUsage * Math.pow(10.0, CPU_DECIMALS))); getInfoLabel().setText(Resources.format(Messages.CPU_USAGE_FORMAT,
--- a/src/share/classes/sun/util/locale/provider/FallbackLocaleProviderAdapter.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/util/locale/provider/FallbackLocaleProviderAdapter.java Wed Jun 05 13:10:11 2013 -0300 @@ -25,6 +25,11 @@ package sun.util.locale.provider; +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; + /** * FallbackProviderAdapter implementation. * @@ -33,10 +38,32 @@ public class FallbackLocaleProviderAdapter extends JRELocaleProviderAdapter { /** + * Supported language tag set. + */ + private static final Set<String> rootTagSet = + Collections.singleton(Locale.ROOT.toLanguageTag()); + + /** + * Fallback provider only provides the ROOT locale data. + */ + private final LocaleResources rootLocaleResources = + new LocaleResources(this, Locale.ROOT); + + /** * Returns the type of this LocaleProviderAdapter */ @Override public LocaleProviderAdapter.Type getAdapterType() { return Type.FALLBACK; } + + @Override + public LocaleResources getLocaleResources(Locale locale) { + return rootLocaleResources; + } + + @Override + protected Set<String> createLanguageTagSet(String category) { + return rootTagSet; + } }
--- a/src/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java Wed Jun 05 13:10:11 2013 -0300 @@ -34,12 +34,10 @@ import java.text.spi.DateFormatSymbolsProvider; import java.text.spi.DecimalFormatSymbolsProvider; import java.text.spi.NumberFormatProvider; -import java.util.Calendar; import java.util.HashSet; import java.util.Locale; import java.util.Set; import java.util.StringTokenizer; -import java.util.TimeZone; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.spi.CalendarDataProvider;
--- a/src/share/classes/sun/util/locale/provider/LocaleProviderAdapter.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/util/locale/provider/LocaleProviderAdapter.java Wed Jun 05 13:10:11 2013 -0300 @@ -120,6 +120,12 @@ private static LocaleProviderAdapter fallbackLocaleProviderAdapter = null; /** + * Default fallback adapter type, which should return something meaningful in any case. + * This is either JRE or FALLBACK. + */ + static LocaleProviderAdapter.Type defaultLocaleProviderAdapter = null; + + /** * Adapter lookup cache. */ private static ConcurrentMap<Class<? extends LocaleServiceProvider>, ConcurrentMap<Locale, LocaleProviderAdapter>> @@ -140,13 +146,19 @@ // load adapter if necessary switch (aType) { case CLDR: - cldrLocaleProviderAdapter = new CLDRLocaleProviderAdapter(); + if (cldrLocaleProviderAdapter == null) { + cldrLocaleProviderAdapter = new CLDRLocaleProviderAdapter(); + } break; case HOST: - hostLocaleProviderAdapter = new HostLocaleProviderAdapter(); + if (hostLocaleProviderAdapter == null) { + hostLocaleProviderAdapter = new HostLocaleProviderAdapter(); + } break; } - typeList.add(aType); + if (!typeList.contains(aType)) { + typeList.add(aType); + } } catch (IllegalArgumentException | UnsupportedOperationException e) { // could be caused by the user specifying wrong // provider name or format in the system property @@ -160,11 +172,15 @@ // Append FALLBACK as the last resort. fallbackLocaleProviderAdapter = new FallbackLocaleProviderAdapter(); typeList.add(Type.FALLBACK); + defaultLocaleProviderAdapter = Type.FALLBACK; + } else { + defaultLocaleProviderAdapter = Type.JRE; } } else { // Default preference list typeList.add(Type.JRE); typeList.add(Type.SPI); + defaultLocaleProviderAdapter = Type.JRE; } adapterPreference = Collections.unmodifiableList(typeList);
--- a/src/share/classes/sun/util/locale/provider/LocaleServiceProviderPool.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/classes/sun/util/locale/provider/LocaleServiceProviderPool.java Wed Jun 05 13:10:11 2013 -0300 @@ -127,32 +127,13 @@ private LocaleServiceProviderPool (final Class<? extends LocaleServiceProvider> c) { providerClass = c; - // Add the JRE Locale Data Adapter implementation. - providers.putIfAbsent(LocaleProviderAdapter.Type.JRE, - LocaleProviderAdapter.forJRE().getLocaleServiceProvider(c)); - - // Add the SPI Locale Data Adapter implementation. - LocaleProviderAdapter lda = LocaleProviderAdapter.forType(LocaleProviderAdapter.Type.SPI); - LocaleServiceProvider provider = lda.getLocaleServiceProvider(c); - if (provider != null) { - providers.putIfAbsent(LocaleProviderAdapter.Type.SPI, provider); - } - - // Add the CLDR Locale Data Adapter implementation, if needed. - lda = LocaleProviderAdapter.forType(LocaleProviderAdapter.Type.CLDR); - if (lda != null) { - provider = lda.getLocaleServiceProvider(c); - if (provider != null) { - providers.putIfAbsent(LocaleProviderAdapter.Type.CLDR, provider); - } - } - - // Add the Host Locale Data Adapter implementation, if needed. - lda = LocaleProviderAdapter.forType(LocaleProviderAdapter.Type.HOST); - if (lda != null) { - provider = lda.getLocaleServiceProvider(c); - if (provider != null) { - providers.putIfAbsent(LocaleProviderAdapter.Type.HOST, provider); + for (LocaleProviderAdapter.Type type : LocaleProviderAdapter.getAdapterPreference()) { + LocaleProviderAdapter lda = LocaleProviderAdapter.forType(type); + if (lda != null) { + LocaleServiceProvider provider = lda.getLocaleServiceProvider(c); + if (provider != null) { + providers.putIfAbsent(type, provider); + } } } } @@ -246,7 +227,8 @@ */ boolean hasProviders() { return providers.size() != 1 || - providers.get(LocaleProviderAdapter.Type.JRE) == null; + (providers.get(LocaleProviderAdapter.Type.JRE) == null && + providers.get(LocaleProviderAdapter.Type.FALLBACK) == null); } /** @@ -296,9 +278,8 @@ // Check whether JRE is the sole locale data provider or not, // and directly call it if it is. if (!hasProviders()) { - return getter.getObject( - (P)providers.get(LocaleProviderAdapter.Type.JRE), - locale, key, params); + return getter.getObject((P)providers.get(LocaleProviderAdapter.defaultLocaleProviderAdapter), + locale, key, params); } List<Locale> lookupLocales = getLookupLocales(locale);
--- a/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipFileSystem.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipFileSystem.java Wed Jun 05 13:10:11 2013 -0300 @@ -1818,7 +1818,7 @@ Entry(byte[] name) { name(name); - this.mtime = System.currentTimeMillis(); + this.mtime = this.ctime = this.atime = System.currentTimeMillis(); this.crc = 0; this.size = 0; this.csize = 0; @@ -1912,17 +1912,18 @@ { int written = CENHDR; int version0 = version(); - long csize0 = csize; long size0 = size; long locoff0 = locoff; int elen64 = 0; // extra for ZIP64 int elenNTFS = 0; // extra for NTFS (a/c/mtime) int elenEXTT = 0; // extra for Extended Timestamp + boolean foundExtraTime = false; // if time stamp NTFS, EXTT present // confirm size/length int nlen = (name != null) ? name.length : 0; int elen = (extra != null) ? extra.length : 0; + int eoff = 0; int clen = (comment != null) ? comment.length : 0; if (csize >= ZIP64_MINVAL) { csize0 = ZIP64_MINVAL; @@ -1936,14 +1937,24 @@ locoff0 = ZIP64_MINVAL; elen64 += 8; // offset(8) } - if (elen64 != 0) + if (elen64 != 0) { elen64 += 4; // header and data sz 4 bytes + } - if (atime != -1) { - if (isWindows) // use NTFS + while (eoff + 4 < elen) { + int tag = SH(extra, eoff); + int sz = SH(extra, eoff + 2); + if (tag == EXTID_EXTT || tag == EXTID_NTFS) { + foundExtraTime = true; + } + eoff += (4 + sz); + } + if (!foundExtraTime) { + if (isWindows) { // use NTFS elenNTFS = 36; // total 36 bytes - else // Extended Timestamp otherwise + } else { // Extended Timestamp otherwise elenEXTT = 9; // only mtime in cen + } } writeInt(os, CENSIG); // CEN header signature if (elen64 != 0) { @@ -2092,11 +2103,13 @@ { writeInt(os, LOCSIG); // LOC header signature int version = version(); - int nlen = (name != null) ? name.length : 0; int elen = (extra != null) ? extra.length : 0; + boolean foundExtraTime = false; // if extra timestamp present + int eoff = 0; int elen64 = 0; int elenEXTT = 0; + int elenNTFS = 0; if ((flag & FLAG_DATADESCR) != 0) { writeShort(os, version()); // version needed to extract writeShort(os, flag); // general purpose bit flag @@ -2128,14 +2141,27 @@ writeInt(os, size); // uncompressed size } } - if (atime != -1 && !isWindows) { // on unix use "ext time" - if (ctime == -1) - elenEXTT = 13; - else - elenEXTT = 17; + while (eoff + 4 < elen) { + int tag = SH(extra, eoff); + int sz = SH(extra, eoff + 2); + if (tag == EXTID_EXTT || tag == EXTID_NTFS) { + foundExtraTime = true; + } + eoff += (4 + sz); + } + if (!foundExtraTime) { + if (isWindows) { + elenNTFS = 36; // NTFS, total 36 bytes + } else { // on unix use "ext time" + elenEXTT = 9; + if (atime != -1) + elenEXTT += 4; + if (ctime != -1) + elenEXTT += 4; + } } writeShort(os, name.length); - writeShort(os, elen + elen64 + elenEXTT); + writeShort(os, elen + elen64 + elenNTFS + elenEXTT); writeBytes(os, name); if (elen64 != 0) { writeShort(os, EXTID_ZIP64); @@ -2143,15 +2169,28 @@ writeLong(os, size); writeLong(os, csize); } + if (elenNTFS != 0) { + writeShort(os, EXTID_NTFS); + writeShort(os, elenNTFS - 4); + writeInt(os, 0); // reserved + writeShort(os, 0x0001); // NTFS attr tag + writeShort(os, 24); + writeLong(os, javaToWinTime(mtime)); + writeLong(os, javaToWinTime(atime)); + writeLong(os, javaToWinTime(ctime)); + } if (elenEXTT != 0) { writeShort(os, EXTID_EXTT); writeShort(os, elenEXTT - 4);// size for the folowing data block - if (ctime == -1) - os.write(0x3); // mtime and atime - else - os.write(0x7); // mtime, atime and ctime + int fbyte = 0x1; + if (atime != -1) // mtime and atime + fbyte |= 0x2; + if (ctime != -1) // mtime, atime and ctime + fbyte |= 0x4; + os.write(fbyte); // flags byte writeInt(os, javaToUnixTime(mtime)); - writeInt(os, javaToUnixTime(atime)); + if (atime != -1) + writeInt(os, javaToUnixTime(atime)); if (ctime != -1) writeInt(os, javaToUnixTime(ctime)); }
--- a/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipInfo.java Wed May 29 13:22:58 2013 -0300 +++ b/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipInfo.java Wed Jun 05 13:10:11 2013 -0300 @@ -214,7 +214,7 @@ winToJavaTime(LL(extra, off + 24))); break; case EXTID_EXTT: - print(" ->Inof-ZIP Extended Timestamp: flag=%x%n",extra[off]); + print(" ->Info-ZIP Extended Timestamp: flag=%x%n",extra[off]); pos = off + 1 ; while (pos + 4 <= off + sz) { print(" *%tc%n", @@ -223,6 +223,7 @@ } break; default: + print(" ->[tag=%x, size=%d]%n", tag, sz); } off += sz; }
--- a/src/share/javavm/export/jmm.h Wed May 29 13:22:58 2013 -0300 +++ b/src/share/javavm/export/jmm.h Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,8 @@ JMM_VERSION_1_1 = 0x20010100, // JDK 6 JMM_VERSION_1_2 = 0x20010200, // JDK 7 JMM_VERSION_1_2_1 = 0x20010201, // JDK 7 GA - JMM_VERSION = 0x20010202 + JMM_VERSION_1_2_2 = 0x20010202, + JMM_VERSION = 0x20010203 }; typedef struct { @@ -62,7 +63,8 @@ unsigned int isObjectMonitorUsageSupported : 1; unsigned int isSynchronizerUsageSupported : 1; unsigned int isThreadAllocatedMemorySupported : 1; - unsigned int : 23; + unsigned int isRemoteDiagnosticCommandsSupported : 1; + unsigned int : 22; } jmmOptionalSupport; typedef enum { @@ -190,21 +192,27 @@ } jmmGCStat; typedef struct { - const char* name; - const char* description; - const char* impact; - int num_arguments; - jboolean enabled; + const char* name; /* Name of the diagnostic command */ + const char* description; /* Short description */ + const char* impact; /* Impact on the JVM */ + const char* permission_class; /* Class name of the required permission if any */ + const char* permission_name; /* Permission name of the required permission if any */ + const char* permission_action; /* Action name of the required permission if any*/ + int num_arguments; /* Number of supported options or arguments */ + jboolean enabled; /* True if the diagnostic command can be invoked, false otherwise*/ } dcmdInfo; typedef struct { - const char* name; - const char* description; - const char* type; - const char* default_string; - jboolean mandatory; - jboolean option; - int position; + const char* name; /* Option/Argument name*/ + const char* description; /* Short description */ + const char* type; /* Type: STRING, BOOLEAN, etc. */ + const char* default_string; /* Default value in a parsable string */ + jboolean mandatory; /* True if the option/argument is mandatory */ + jboolean option; /* True if it is an option, false if it is an argument */ + /* (see diagnosticFramework.hpp for option/argument definitions) */ + jboolean multiple; /* True is the option can be specified several time */ + int position; /* Expected position for this argument (this field is */ + /* meaningless for options) */ } dcmdArgInfo; typedef struct jmmInterface_1_ { @@ -327,6 +335,9 @@ jstring (JNICALL *ExecuteDiagnosticCommand) (JNIEnv *env, jstring command); + void (JNICALL *SetDiagnosticFrameworkNotificationEnabled) + (JNIEnv *env, + jboolean enabled); } JmmInterface; #ifdef __cplusplus
--- a/src/share/native/sun/java2d/cmm/lcms/cmscgats.c Wed May 29 13:22:58 2013 -0300 +++ b/src/share/native/sun/java2d/cmm/lcms/cmscgats.c Wed Jun 05 13:10:11 2013 -0300 @@ -634,6 +634,8 @@ cmsFloat64Number dnum = 0.0; int sign = 1; + if (Buffer == NULL) return 0.0; + if (*Buffer == '-' || *Buffer == '+') { sign = (*Buffer == '-') ? -1 : 1;
--- a/src/share/native/sun/java2d/cmm/lcms/cmslut.c Wed May 29 13:22:58 2013 -0300 +++ b/src/share/native/sun/java2d/cmm/lcms/cmslut.c Wed Jun 05 13:10:11 2013 -0300 @@ -1021,6 +1021,8 @@ mpe = cmsStageAllocToneCurves(ContextID, 3, LabTable); cmsFreeToneCurveTriple(LabTable); + if (mpe == NULL) return NULL; + mpe ->Implements = cmsSigLabV2toV4; return mpe; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/native/sun/management/DiagnosticCommandImpl.c Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include <jni.h> +#include "management.h" +#include "sun_management_DiagnosticCommandImpl.h" + +JNIEXPORT void JNICALL Java_sun_management_DiagnosticCommandImpl_setNotificationEnabled +(JNIEnv *env, jobject dummy, jboolean enabled) { + if(jmm_version > JMM_VERSION_1_2_2) { + jmm_interface->SetDiagnosticFrameworkNotificationEnabled(env, enabled); + } else { + JNU_ThrowByName(env, "java/lang/UnsupportedOperationException", + "JMX interface to diagnostic framework notifications is not supported by this VM"); + } +} + +JNIEXPORT jobjectArray JNICALL +Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommands + (JNIEnv *env, jobject dummy) +{ + return jmm_interface->GetDiagnosticCommands(env); +} + +jobject getDiagnosticCommandArgumentInfoArray(JNIEnv *env, jstring command, + int num_arg) { + int i; + jobject obj; + jobjectArray result; + dcmdArgInfo* dcmd_arg_info_array; + jclass dcmdArgInfoCls; + jclass arraysCls; + jmethodID mid; + jobject resultList; + + dcmd_arg_info_array = (dcmdArgInfo*) malloc(num_arg * sizeof(dcmdArgInfo)); + if (dcmd_arg_info_array == NULL) { + return NULL; + } + jmm_interface->GetDiagnosticCommandArgumentsInfo(env, command, + dcmd_arg_info_array); + dcmdArgInfoCls = (*env)->FindClass(env, + "sun/management/DiagnosticCommandArgumentInfo"); + result = (*env)->NewObjectArray(env, num_arg, dcmdArgInfoCls, NULL); + if (result == NULL) { + free(dcmd_arg_info_array); + return NULL; + } + for (i=0; i<num_arg; i++) { + obj = JNU_NewObjectByName(env, + "sun/management/DiagnosticCommandArgumentInfo", + "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZZZI)V", + (*env)->NewStringUTF(env,dcmd_arg_info_array[i].name), + (*env)->NewStringUTF(env,dcmd_arg_info_array[i].description), + (*env)->NewStringUTF(env,dcmd_arg_info_array[i].type), + dcmd_arg_info_array[i].default_string == NULL ? NULL: + (*env)->NewStringUTF(env, dcmd_arg_info_array[i].default_string), + dcmd_arg_info_array[i].mandatory, + dcmd_arg_info_array[i].option, + dcmd_arg_info_array[i].multiple, + dcmd_arg_info_array[i].position); + if (obj == NULL) { + free(dcmd_arg_info_array); + return NULL; + } + (*env)->SetObjectArrayElement(env, result, i, obj); + } + free(dcmd_arg_info_array); + arraysCls = (*env)->FindClass(env, "java/util/Arrays"); + mid = (*env)->GetStaticMethodID(env, arraysCls, + "asList", "([Ljava/lang/Object;)Ljava/util/List;"); + resultList = (*env)->CallStaticObjectMethod(env, arraysCls, mid, result); + return resultList; +} + +/* Throws IllegalArgumentException if at least one of the diagnostic command + * passed in argument is not supported by the JVM + */ +JNIEXPORT jobjectArray JNICALL +Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommandInfo +(JNIEnv *env, jobject dummy, jobjectArray commands) +{ + int i; + jclass dcmdInfoCls; + jobject result; + jobjectArray args; + jobject obj; + jmmOptionalSupport mos; + jint ret = jmm_interface->GetOptionalSupport(env, &mos); + jsize num_commands; + dcmdInfo* dcmd_info_array; + + if (commands == NULL) { + JNU_ThrowNullPointerException(env, "Invalid String Array"); + return NULL; + } + num_commands = (*env)->GetArrayLength(env, commands); + dcmd_info_array = (dcmdInfo*) malloc(num_commands * + sizeof(dcmdInfo)); + if (dcmd_info_array == NULL) { + JNU_ThrowOutOfMemoryError(env, NULL); + } + jmm_interface->GetDiagnosticCommandInfo(env, commands, dcmd_info_array); + dcmdInfoCls = (*env)->FindClass(env, + "sun/management/DiagnosticCommandInfo"); + result = (*env)->NewObjectArray(env, num_commands, dcmdInfoCls, NULL); + if (result == NULL) { + free(dcmd_info_array); + JNU_ThrowOutOfMemoryError(env, 0); + } + for (i=0; i<num_commands; i++) { + args = getDiagnosticCommandArgumentInfoArray(env, + (*env)->GetObjectArrayElement(env,commands,i), + dcmd_info_array[i].num_arguments); + if (args == NULL) { + free(dcmd_info_array); + JNU_ThrowOutOfMemoryError(env, 0); + } + obj = JNU_NewObjectByName(env, + "sun/management/DiagnosticCommandInfo", + "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZLjava/util/List;)V", + (*env)->NewStringUTF(env,dcmd_info_array[i].name), + (*env)->NewStringUTF(env,dcmd_info_array[i].description), + (*env)->NewStringUTF(env,dcmd_info_array[i].impact), + dcmd_info_array[i].permission_class==NULL?NULL:(*env)->NewStringUTF(env,dcmd_info_array[i].permission_class), + dcmd_info_array[i].permission_name==NULL?NULL:(*env)->NewStringUTF(env,dcmd_info_array[i].permission_name), + dcmd_info_array[i].permission_action==NULL?NULL:(*env)->NewStringUTF(env,dcmd_info_array[i].permission_action), + dcmd_info_array[i].enabled, + args); + if (obj == NULL) { + free(dcmd_info_array); + JNU_ThrowOutOfMemoryError(env, 0); + } + (*env)->SetObjectArrayElement(env, result, i, obj); + } + free(dcmd_info_array); + return result; +} + +/* Throws IllegalArgumentException if the diagnostic command + * passed in argument is not supported by the JVM + */ +JNIEXPORT jstring JNICALL +Java_sun_management_DiagnosticCommandImpl_executeDiagnosticCommand +(JNIEnv *env, jobject dummy, jstring command) { + return jmm_interface->ExecuteDiagnosticCommand(env, command); +}
--- a/src/share/native/sun/management/VMManagementImpl.c Wed May 29 13:22:58 2013 -0300 +++ b/src/share/native/sun/management/VMManagementImpl.c Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ */ #include <jni.h> +#include <stdlib.h> #include "jvm.h" #include "management.h" #include "sun_management_VMManagementImpl.h" @@ -96,6 +97,9 @@ value = mos.isThreadAllocatedMemorySupported; setStaticBooleanField(env, cls, "threadAllocatedMemorySupport", value); + value = mos.isRemoteDiagnosticCommandsSupported; + setStaticBooleanField(env, cls, "remoteDiagnosticCommandsSupport", value); + if ((jmm_version > JMM_VERSION_1_2) || (jmm_version == JMM_VERSION_1_2 && ((jmm_version&0xFF) >= 1))) { setStaticBooleanField(env, cls, "gcNotificationSupport", JNI_TRUE);
--- a/src/solaris/bin/java_md_solinux.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/bin/java_md_solinux.c Wed Jun 05 13:10:11 2013 -0300 @@ -649,9 +649,9 @@ && (dmpath == NULL) /* data model specific variables not set */ #endif /* __solaris__ */ ) { - + JLI_MemFree(newargv); + JLI_MemFree(new_runpath); return; - } } @@ -935,7 +935,7 @@ char buf[PATH_MAX+1]; int len = readlink(self, buf, PATH_MAX); if (len >= 0) { - buf[len] = '\0'; /* readlink doesn't nul terminate */ + buf[len] = '\0'; /* readlink(2) doesn't NUL terminate */ exec_path = JLI_StringDup(buf); } }
--- a/src/solaris/classes/sun/awt/X11/MotifDnDConstants.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/MotifDnDConstants.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -190,7 +190,7 @@ try { Native.putLong(data, motifWindow); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangeProperty(XToolkit.getDisplay(), defaultRootWindow, XA_MOTIF_DRAG_WINDOW.getAtom(), @@ -198,10 +198,10 @@ XConstants.PropModeReplace, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write motif drag window handle."); } @@ -394,7 +394,7 @@ } } - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangeProperty(XToolkit.getDisplay(), motifWindow, XA_MOTIF_DRAG_TARGETS.getAtom(), @@ -402,15 +402,15 @@ XConstants.PropModeReplace, data, tableSize); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { // Create a new motif window and retry. motifWindow = createMotifWindow(); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangeProperty(XToolkit.getDisplay(), motifWindow, XA_MOTIF_DRAG_TARGETS.getAtom(), @@ -418,10 +418,10 @@ XConstants.PropModeReplace, data, tableSize); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write motif drag targets property."); } } @@ -534,16 +534,16 @@ // CARD32 icc_handle unsafe.putInt(structData + 4, (int)XA_MOTIF_ATOM_0.getAtom()); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangeProperty(XToolkit.getDisplay(), window, XA_MOTIF_ATOM_0.getAtom(), XA_MOTIF_DRAG_INITIATOR_INFO.getAtom(), 8, XConstants.PropModeReplace, structData, MOTIF_INITIATOR_INFO_SIZE); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write drag initiator info"); } } finally { @@ -567,16 +567,16 @@ unsafe.putShort(data + 10, (short)0); /* pad */ unsafe.putInt(data + 12, dataSize); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangeProperty(XToolkit.getDisplay(), window, XA_MOTIF_DRAG_RECEIVER_INFO.getAtom(), XA_MOTIF_DRAG_RECEIVER_INFO.getAtom(), 8, XConstants.PropModeReplace, data, dataSize); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write Motif receiver info property"); } } finally {
--- a/src/solaris/classes/sun/awt/X11/MotifDnDDropTargetProtocol.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/MotifDnDDropTargetProtocol.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -162,16 +162,16 @@ unsafe.putInt(data + 12, dataSize); } - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangeProperty(XToolkit.getDisplay(), embedder, MotifDnDConstants.XA_MOTIF_DRAG_RECEIVER_INFO.getAtom(), MotifDnDConstants.XA_MOTIF_DRAG_RECEIVER_INFO.getAtom(), 8, XConstants.PropModeReplace, data, dataSize); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write Motif receiver info property"); } } finally { @@ -236,16 +236,16 @@ unsafe.putInt(data + 4, tproxy); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangeProperty(XToolkit.getDisplay(), embedder, MotifDnDConstants.XA_MOTIF_DRAG_RECEIVER_INFO.getAtom(), MotifDnDConstants.XA_MOTIF_DRAG_RECEIVER_INFO.getAtom(), 8, XConstants.PropModeReplace, data, dataSize); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write Motif receiver info property"); } } @@ -412,15 +412,15 @@ */ XWindowAttributes wattr = new XWindowAttributes(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); int status = XlibWrapper.XGetWindowAttributes(XToolkit.getDisplay(), source_win, wattr.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (status == 0 || - (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success)) { + if ((status == 0) || + ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success))) { throw new XException("XGetWindowAttributes failed"); } @@ -429,15 +429,15 @@ wattr.dispose(); } - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), source_win, source_win_mask | XConstants.StructureNotifyMask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("XSelectInput failed"); } @@ -1024,10 +1024,10 @@ if (sourceWindow != 0) { XToolkit.awtLock(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), sourceWindow, sourceWindowMask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); } finally { XToolkit.awtUnlock(); }
--- a/src/solaris/classes/sun/awt/X11/WindowPropertyGetter.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/WindowPropertyGetter.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,7 +99,7 @@ } if (errorHandler != null) { - XToolkit.WITH_XERROR_HANDLER(errorHandler); + XErrorHandlerUtil.WITH_XERROR_HANDLER(errorHandler); } Native.putLong(data, 0); int status = XlibWrapper.XGetWindowProperty(XToolkit.getDisplay(), window, property.getAtom(), @@ -112,7 +112,7 @@ } if (errorHandler != null) { - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); } return status; } finally {
--- a/src/solaris/classes/sun/awt/X11/XConstants.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XConstants.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -674,4 +674,9 @@ public static final long XkbModifierMapMask = (1L<<2); public static final long XkbVirtualModsMask = (1L<<6); //server map + /***************************************************************** + * X SHARED MEMORY EXTENSION FUNCTIONS + *****************************************************************/ + + public static final int X_ShmAttach = 1; }
--- a/src/solaris/classes/sun/awt/X11/XDnDDragSourceProtocol.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XDnDDragSourceProtocol.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,14 +96,14 @@ action_count++; } - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndActionList.setAtomData(window, XAtom.XA_ATOM, data, action_count); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error) != null && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { cleanup(); throw new XException("Cannot write XdndActionList property"); } @@ -117,14 +117,14 @@ try { Native.put(data, formats); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndTypeList.setAtomData(window, XAtom.XA_ATOM, data, formats.length); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { cleanup(); throw new XException("Cannot write XdndActionList property"); }
--- a/src/solaris/classes/sun/awt/X11/XDnDDropTargetProtocol.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XDnDDropTargetProtocol.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,12 +88,12 @@ try { Native.putLong(data, 0, XDnDConstants.XDND_PROTOCOL_VERSION); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndAware.setAtomData(window, XAtom.XA_ATOM, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write XdndAware property"); } } finally { @@ -205,54 +205,50 @@ /* The proxy window must have the XdndAware set, as XDnD protocol prescribes to check the proxy window for XdndAware. */ - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndAware.setAtomData(newProxy, XAtom.XA_ATOM, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != - XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write XdndAware property"); } Native.putLong(data, 0, newProxy); /* The proxy window must have the XdndProxy set to point to itself.*/ - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndProxy.setAtomData(newProxy, XAtom.XA_WINDOW, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != - XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write XdndProxy property"); } Native.putLong(data, 0, XDnDConstants.XDND_PROTOCOL_VERSION); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndAware.setAtomData(embedder, XAtom.XA_ATOM, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != - XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write XdndAware property"); } Native.putLong(data, 0, newProxy); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndProxy.setAtomData(embedder, XAtom.XA_WINDOW, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != - XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write XdndProxy property"); } } finally { @@ -278,27 +274,25 @@ try { Native.putLong(data, 0, entry.getVersion()); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndAware.setAtomData(embedder, XAtom.XA_ATOM, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != - XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write XdndAware property"); } Native.putLong(data, 0, (int)entry.getProxy()); - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndProxy.setAtomData(embedder, XAtom.XA_WINDOW, data, 1); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != - XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("Cannot write XdndProxy property"); } } finally { @@ -541,15 +535,15 @@ */ XWindowAttributes wattr = new XWindowAttributes(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); int status = XlibWrapper.XGetWindowAttributes(XToolkit.getDisplay(), source_win, wattr.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (status == 0 || - (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success)) { + if ((status == 0) || + ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success))) { throw new XException("XGetWindowAttributes failed"); } @@ -558,15 +552,15 @@ wattr.dispose(); } - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), source_win, source_win_mask | XConstants.StructureNotifyMask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("XSelectInput failed"); } @@ -963,10 +957,10 @@ if (sourceWindow != 0) { XToolkit.awtLock(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), sourceWindow, sourceWindowMask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); } finally { XToolkit.awtUnlock(); } @@ -1111,15 +1105,15 @@ XToolkit.awtLock(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XDnDConstants.XA_XdndTypeList.setAtomData(xclient.get_window(), XAtom.XA_ATOM, wpg.getData(), wpg.getNumberOfItems()); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { if (logger.isLoggable(PlatformLogger.WARNING)) { logger.warning("Cannot set XdndTypeList on the proxy window"); }
--- a/src/solaris/classes/sun/awt/X11/XDragSourceProtocol.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XDragSourceProtocol.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,15 +181,15 @@ long time) { XWindowAttributes wattr = new XWindowAttributes(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); int status = XlibWrapper.XGetWindowAttributes(XToolkit.getDisplay(), targetWindow, wattr.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (status == 0 || - (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success)) { + if ((status == 0) || + ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success))) { throw new XException("XGetWindowAttributes failed"); } @@ -198,15 +198,15 @@ wattr.dispose(); } - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), targetWindow, targetWindowMask | XConstants.StructureNotifyMask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("XSelectInput failed"); } @@ -214,10 +214,10 @@ } protected final void finalizeDrop() { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), targetWindow, targetWindowMask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); } public abstract boolean processProxyModeEvent(XClientMessageEvent xclient,
--- a/src/solaris/classes/sun/awt/X11/XDropTargetRegistry.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XDropTargetRegistry.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -168,14 +168,14 @@ if (dest_x >= 0 && dest_y >= 0) { XWindowAttributes wattr = new XWindowAttributes(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); int status = XlibWrapper.XGetWindowAttributes(XToolkit.getDisplay(), window, wattr.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (status == 0 || - (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success)) { + if ((status == 0) || + ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success))) { continue; } @@ -222,14 +222,14 @@ long event_mask = 0; XWindowAttributes wattr = new XWindowAttributes(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); int status = XlibWrapper.XGetWindowAttributes(XToolkit.getDisplay(), embedder, wattr.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (status == 0 || - (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success)) { + if ((status == 0) || + ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success))) { throw new XException("XGetWindowAttributes failed"); } @@ -240,13 +240,13 @@ } if ((event_mask & XConstants.PropertyChangeMask) == 0) { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), embedder, event_mask | XConstants.PropertyChangeMask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("XSelectInput failed"); } } @@ -394,13 +394,13 @@ /* Restore the original event mask for the embedder. */ if ((event_mask & XConstants.PropertyChangeMask) == 0) { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); XlibWrapper.XSelectInput(XToolkit.getDisplay(), embedder, event_mask); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { throw new XException("XSelectInput failed"); } }
--- a/src/solaris/classes/sun/awt/X11/XEmbedCanvasPeer.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XEmbedCanvasPeer.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -301,15 +301,15 @@ try { XWindowAttributes wattr = new XWindowAttributes(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); int status = XlibWrapper.XGetWindowAttributes(XToolkit.getDisplay(), xembed.handle, wattr.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (status == 0 || - (XToolkit.saved_error != null && - XToolkit.saved_error.get_error_code() != XConstants.Success)) { + if ((status == 0) || + ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success))) { return null; }
--- a/src/solaris/classes/sun/awt/X11/XErrorHandler.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XErrorHandler.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,30 @@ public static class XBaseErrorHandler extends XErrorHandler { @Override public int handleError(long display, XErrorEvent err) { - return XToolkit.SAVED_ERROR_HANDLER(display, err); + return XErrorHandlerUtil.SAVED_XERROR_HANDLER(display, err); + } + } + + /** + * This is a base synthetic error handler containing a boolean flag which allows + * to show that an error is handled or not. + */ + public static class XErrorHandlerWithFlag extends XBaseErrorHandler { + private volatile boolean errorOccurred = false; + + public boolean getErrorOccurredFlag() { + return errorOccurred; + } + + /** + * Sets an internal boolean flag to a particular value. Should be always called with + * <code>false</code> value of the parameter <code>errorOccurred</code> before this + * error handler is set as current. + * @param errorOccurred <code>true</code> to indicate that an error was handled, + * <code>false</code> to reset the internal boolean flag + */ + public void setErrorOccurredFlag(boolean errorOccurred) { + this.errorOccurred = errorOccurred; } } @@ -76,4 +99,51 @@ return theInstance; } } + + /** + * This is a synthetic error handler for errors generated by the native function + * <code>XShmAttach</code>. If an error is handled, an internal boolean flag of the + * handler is set to <code>true</code>. + */ + public static final class XShmAttachHandler extends XErrorHandlerWithFlag { + private XShmAttachHandler() {} + + @Override + public int handleError(long display, XErrorEvent err) { + if (err.get_minor_code() == XConstants.X_ShmAttach) { + setErrorOccurredFlag(true); + return 0; + } + return super.handleError(display, err); + } + + // Shared instance + private static XShmAttachHandler theInstance = new XShmAttachHandler(); + public static XShmAttachHandler getInstance() { + return theInstance; + } + } + + /** + * This is a synthetic error handler for <code>BadAlloc</code> errors generated by the + * native <code>glX*</code> functions. Its internal boolean flag is set to <code>true</code>, + * if an error is handled. + */ + public static final class GLXBadAllocHandler extends XErrorHandlerWithFlag { + private GLXBadAllocHandler() {} + + @Override + public int handleError(long display, XErrorEvent err) { + if (err.get_error_code() == XConstants.BadAlloc) { + setErrorOccurredFlag(true); + return 0; + } + return super.handleError(display, err); + } + + private static GLXBadAllocHandler theInstance = new GLXBadAllocHandler(); + public static GLXBadAllocHandler getInstance() { + return theInstance; + } + } }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/solaris/classes/sun/awt/X11/XErrorHandlerUtil.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package sun.awt.X11; + +import java.security.AccessController; +import sun.awt.SunToolkit; +import sun.security.action.GetBooleanAction; +import sun.util.logging.PlatformLogger; + +/** + * This class contains code of the global toolkit error handler, exposes static + * methods which allow to set and unset synthetic error handlers. + */ +public final class XErrorHandlerUtil { + private static final PlatformLogger log = PlatformLogger.getLogger("sun.awt.X11.XErrorHandlerUtil"); + + /** + * The connection to X11 window server. + */ + private static long display; + + /** + * Error handler at the moment of <code>XErrorHandlerUtil</code> initialization. + */ + private static long saved_error_handler; + + /** + * XErrorEvent being handled. + */ + static volatile XErrorEvent saved_error; + + /** + * Current error handler or null if no error handler is set. + */ + private static XErrorHandler current_error_handler; + + /** + * Value of sun.awt.noisyerrorhandler system property. + */ + private static boolean noisyAwtHandler = AccessController.doPrivileged( + new GetBooleanAction("sun.awt.noisyerrorhandler")); + + /** + * The flag indicating that <code>init</code> was called already. + */ + private static boolean initPassed; + + /** + * Guarantees that no instance of this class can be created. + */ + private XErrorHandlerUtil() {} + + /** + * Sets the toolkit global error handler, stores the connection to X11 server, which + * will be used during an error handling process. This method is called once from + * <code>awt_init_Display</code> function defined in <code>awt_GraphicsEnv.c</code> + * file immediately after the connection to X11 window server is opened. + * @param display the connection to X11 server which should be stored + */ + private static void init(long display) { + SunToolkit.awtLock(); + try { + if (!initPassed) { + XErrorHandlerUtil.display = display; + saved_error_handler = XlibWrapper.SetToolkitErrorHandler(); + initPassed = true; + } + } finally { + SunToolkit.awtUnlock(); + } + } + + /** + * Sets a synthetic error handler. Must be called with the acquired AWT lock. + * @param handler the synthetic error handler to set + */ + public static void WITH_XERROR_HANDLER(XErrorHandler handler) { + saved_error = null; + current_error_handler = handler; + } + + /** + * Unsets a current synthetic error handler. Must be called with the acquired AWT lock. + */ + public static void RESTORE_XERROR_HANDLER() { + // Wait until all requests are processed by the X server + // and only then uninstall the error handler. + XSync(); + current_error_handler = null; + } + + /** + * Should be called under LOCK. + */ + public static int SAVED_XERROR_HANDLER(long display, XErrorEvent error) { + if (saved_error_handler != 0) { + // Default XErrorHandler may just terminate the process. Don't call it. + // return XlibWrapper.CallErrorHandler(saved_error_handler, display, error.pData); + } + if (log.isLoggable(PlatformLogger.FINE)) { + log.fine("Unhandled XErrorEvent: " + + "id=" + error.get_resourceid() + ", " + + "serial=" + error.get_serial() + ", " + + "ec=" + error.get_error_code() + ", " + + "rc=" + error.get_request_code() + ", " + + "mc=" + error.get_minor_code()); + } + return 0; + } + + /** + * Called from the native code when an error occurs. + */ + private static int globalErrorHandler(long display, long event_ptr) { + if (noisyAwtHandler) { + XlibWrapper.PrintXErrorEvent(display, event_ptr); + } + XErrorEvent event = new XErrorEvent(event_ptr); + saved_error = event; + try { + if (current_error_handler != null) { + return current_error_handler.handleError(display, event); + } else { + return SAVED_XERROR_HANDLER(display, event); + } + } catch (Throwable z) { + log.fine("Error in GlobalErrorHandler", z); + } + return 0; + } + + private static void XSync() { + SunToolkit.awtLock(); + try { + XlibWrapper.XSync(display, 0); + } finally { + SunToolkit.awtUnlock(); + } + } +}
--- a/src/solaris/classes/sun/awt/X11/XQueryTree.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XQueryTree.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ } __executed = true; if (errorHandler != null) { - XToolkit.WITH_XERROR_HANDLER(errorHandler); + XErrorHandlerUtil.WITH_XERROR_HANDLER(errorHandler); } Native.putLong(children_ptr, 0); int status = @@ -72,7 +72,7 @@ children_ptr, nchildren_ptr ); if (errorHandler != null) { - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); } return status; } finally {
--- a/src/solaris/classes/sun/awt/X11/XToolkit.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XToolkit.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -128,7 +128,6 @@ initIDs(); setBackingStoreType(); } - noisyAwtHandler = AccessController.doPrivileged(new GetBooleanAction("sun.awt.noisyerrorhandler")); } /* @@ -137,78 +136,6 @@ */ static native long getTrayIconDisplayTimeout(); - //---- ERROR HANDLER CODE ----// - - /* - * Error handler at the moment of XToolkit initialization - */ - private static long saved_error_handler; - - /* - * XErrorEvent being handled - */ - static volatile XErrorEvent saved_error; - - /* - * Current error handler or null if no error handler is set - */ - private static XErrorHandler current_error_handler; - - /* - * Value of sun.awt.noisyerrorhandler system property - */ - private static boolean noisyAwtHandler; - - public static void WITH_XERROR_HANDLER(XErrorHandler handler) { - saved_error = null; - current_error_handler = handler; - } - - public static void RESTORE_XERROR_HANDLER() { - // wait until all requests are processed by the X server - // and only then uninstall the error handler - XSync(); - current_error_handler = null; - } - - // Should be called under LOCK - public static int SAVED_ERROR_HANDLER(long display, XErrorEvent error) { - if (saved_error_handler != 0) { - // Default XErrorHandler may just terminate the process. Don't call it. - // return XlibWrapper.CallErrorHandler(saved_error_handler, display, error.pData); - } - if (log.isLoggable(PlatformLogger.FINE)) { - log.fine("Unhandled XErrorEvent: " + - "id=" + error.get_resourceid() + ", " + - "serial=" + error.get_serial() + ", " + - "ec=" + error.get_error_code() + ", " + - "rc=" + error.get_request_code() + ", " + - "mc=" + error.get_minor_code()); - } - return 0; - } - - // Called from the native code when an error occurs - private static int globalErrorHandler(long display, long event_ptr) { - if (noisyAwtHandler) { - XlibWrapper.PrintXErrorEvent(display, event_ptr); - } - XErrorEvent event = new XErrorEvent(event_ptr); - saved_error = event; - try { - if (current_error_handler != null) { - return current_error_handler.handleError(display, event); - } else { - return SAVED_ERROR_HANDLER(display, event); - } - } catch (Throwable z) { - log.fine("Error in GlobalErrorHandler", z); - } - return 0; - } - - //---- END OF ERROR HANDLER CODE ----// - private native static void initIDs(); native static void waitForEvents(long nextTaskTime); static Thread toolkitThread; @@ -306,8 +233,6 @@ //set system property if not yet assigned System.setProperty("sun.awt.enableExtraMouseButtons", ""+areExtraMouseButtonsEnabled); - saved_error_handler = XlibWrapper.SetToolkitErrorHandler(); - // Detect display mode changes XlibWrapper.XSelectInput(XToolkit.getDisplay(), XToolkit.getDefaultRootWindow(), XConstants.StructureNotifyMask); XToolkit.addEventDispatcher(XToolkit.getDefaultRootWindow(), new XEventDispatcher() {
--- a/src/solaris/classes/sun/awt/X11/XTranslateCoordinates.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XTranslateCoordinates.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,7 +68,7 @@ } __executed = true; if (errorHandler != null) { - XToolkit.WITH_XERROR_HANDLER(errorHandler); + XErrorHandlerUtil.WITH_XERROR_HANDLER(errorHandler); } int status = XlibWrapper.XTranslateCoordinates(XToolkit.getDisplay(), @@ -80,7 +80,7 @@ dest_y_ptr, child_ptr ); if (errorHandler != null) { - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); } return status; } finally {
--- a/src/solaris/classes/sun/awt/X11/XWM.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XWM.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,12 +284,12 @@ winmgr_running = false; substruct.set_event_mask(XConstants.SubstructureRedirectMask); - XToolkit.WITH_XERROR_HANDLER(detectWMHandler); + XErrorHandlerUtil.WITH_XERROR_HANDLER(detectWMHandler); XlibWrapper.XChangeWindowAttributes(XToolkit.getDisplay(), XToolkit.getDefaultRootWindow(), XConstants.CWEventMask, substruct.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); /* * If no WM is running then our selection for SubstructureRedirect @@ -632,15 +632,16 @@ XToolkit.awtLock(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.VerifyChangePropertyHandler.getInstance()); XlibWrapper.XChangePropertyS(XToolkit.getDisplay(), XToolkit.getDefaultRootWindow(), XA_ICEWM_WINOPTHINT.getAtom(), XA_ICEWM_WINOPTHINT.getAtom(), 8, XConstants.PropModeReplace, new String(opt)); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); - if (XToolkit.saved_error != null && XToolkit.saved_error.get_error_code() != XConstants.Success) { + if ((XErrorHandlerUtil.saved_error != null) && + (XErrorHandlerUtil.saved_error.get_error_code() != XConstants.Success)) { log.finer("Erorr getting XA_ICEWM_WINOPTHINT property"); return false; }
--- a/src/solaris/classes/sun/awt/X11/XlibUtil.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/XlibUtil.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,8 +151,8 @@ { int status = xtc.execute(XErrorHandler.IgnoreBadWindowHandler.getInstance()); if ((status != 0) && - ((XToolkit.saved_error == null) || - (XToolkit.saved_error.get_error_code() == XConstants.Success))) + ((XErrorHandlerUtil.saved_error == null) || + (XErrorHandlerUtil.saved_error.get_error_code() == XConstants.Success))) { translated = new Point(xtc.get_dest_x(), xtc.get_dest_y()); } @@ -345,13 +345,13 @@ XWindowAttributes wattr = new XWindowAttributes(); try { - XToolkit.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); + XErrorHandlerUtil.WITH_XERROR_HANDLER(XErrorHandler.IgnoreBadWindowHandler.getInstance()); int status = XlibWrapper.XGetWindowAttributes(XToolkit.getDisplay(), window, wattr.pData); - XToolkit.RESTORE_XERROR_HANDLER(); + XErrorHandlerUtil.RESTORE_XERROR_HANDLER(); if ((status != 0) && - ((XToolkit.saved_error == null) || - (XToolkit.saved_error.get_error_code() == XConstants.Success))) + ((XErrorHandlerUtil.saved_error == null) || + (XErrorHandlerUtil.saved_error.get_error_code() == XConstants.Success))) { return wattr.get_map_state(); }
--- a/src/solaris/classes/sun/awt/X11/generator/WrapperGenerator.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/awt/X11/generator/WrapperGenerator.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -996,7 +996,7 @@ pw.println("\t\t\t}"); pw.println("\t\t\t__executed = true;"); pw.println("\t\t\tif (errorHandler != null) {"); - pw.println("\t\t\t XToolkit.WITH_XERROR_HANDLER(errorHandler);"); + pw.println("\t\t\t XErrorHandlerUtil.WITH_XERROR_HANDLER(errorHandler);"); pw.println("\t\t\t}"); iter = ft.getArguments().iterator(); while (iter.hasNext()) { @@ -1025,7 +1025,7 @@ } pw.println("\t\t\t);"); pw.println("\t\t\tif (errorHandler != null) {"); - pw.println("\t\t\t XToolkit.RESTORE_XERROR_HANDLER();"); + pw.println("\t\t\t XErrorHandlerUtil.RESTORE_XERROR_HANDLER();"); pw.println("\t\t\t}"); if (!ft.isVoid()) { pw.println("\t\t\treturn status;");
--- a/src/solaris/classes/sun/print/IPPPrintService.java Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/classes/sun/print/IPPPrintService.java Wed Jun 05 13:10:11 2013 -0300 @@ -1023,6 +1023,13 @@ // this is already supported in UnixPrintJob catList.add(Destination.class); + + // It is unfortunate that CUPS doesn't provide a way to query + // if printer supports collation but since most printers + // now supports collation and that most OS has a way + // of setting it, it is a safe assumption to just always + // include SheetCollate as supported attribute. + catList.add(SheetCollate.class); } // With the assumption that Chromaticity is equivalent to
--- a/src/solaris/native/java/net/NetworkInterface.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/java/net/NetworkInterface.c Wed Jun 05 13:10:11 2013 -0300 @@ -658,9 +658,9 @@ if (ia2Obj) { setInetAddress_addr(env, ia2Obj, htonl(((struct sockaddr_in*)addrP->brdcast)->sin_addr.s_addr)); (*env)->SetObjectField(env, ibObj, ni_ib4broadcastID, ia2Obj); - (*env)->SetShortField(env, ibObj, ni_ib4maskID, addrP->mask); } } + (*env)->SetShortField(env, ibObj, ni_ib4maskID, addrP->mask); (*env)->SetObjectArrayElement(env, bindArr, bind_index++, ibObj); } } @@ -887,15 +887,12 @@ addrP->mask = prefix; addrP->next = 0; if (family == AF_INET) { - /* - * Deal with broadcast addr & subnet mask - */ + // Deal with broadcast addr & subnet mask struct sockaddr * brdcast_to = (struct sockaddr *) ((char *) addrP + sizeof(netaddr) + addr_size); addrP->brdcast = getBroadcast(env, sock, name, brdcast_to ); - if (addrP->brdcast && (mask = getSubnet(env, sock, name)) != -1) { + if ((mask = getSubnet(env, sock, name)) != -1) addrP->mask = mask; - } } /**
--- a/src/solaris/native/sun/awt/awt_GraphicsEnv.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/awt/awt_GraphicsEnv.c Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -758,6 +758,8 @@ } XSetIOErrorHandler(xioerror_handler); + JNU_CallStaticMethodByName(env, NULL, "sun/awt/X11/XErrorHandlerUtil", "init", "(J)V", + ptr_to_jlong(awt_display)); /* set awt_numScreens, and whether or not we're using Xinerama */ xineramaInit(); @@ -904,28 +906,12 @@ static jint canUseShmExt = UNSET_MITSHM; static jint canUseShmExtPixmaps = UNSET_MITSHM; -static jboolean xshmAttachFailed = JNI_FALSE; - -int J2DXErrHandler(Display *display, XErrorEvent *xerr) { - int ret = 0; - if (xerr->minor_code == X_ShmAttach) { - xshmAttachFailed = JNI_TRUE; - } else { - ret = (*xerror_saved_handler)(display, xerr); - } - return ret; -} -jboolean isXShmAttachFailed() { - return xshmAttachFailed; -} -void resetXShmAttachFailed() { - xshmAttachFailed = JNI_FALSE; -} void TryInitMITShm(JNIEnv *env, jint *shmExt, jint *shmPixmaps) { XShmSegmentInfo shminfo; int XShmMajor, XShmMinor; int a, b, c; + jboolean xShmAttachResult; AWT_LOCK(); if (canUseShmExt != UNSET_MITSHM) { @@ -963,21 +949,14 @@ } shminfo.readOnly = True; - resetXShmAttachFailed(); - /** - * The J2DXErrHandler handler will set xshmAttachFailed - * to JNI_TRUE if any Shm error has occured. - */ - EXEC_WITH_XERROR_HANDLER(J2DXErrHandler, - XShmAttach(awt_display, &shminfo)); - + xShmAttachResult = TryXShmAttach(env, awt_display, &shminfo); /** * Get rid of the id now to reduce chances of leaking * system resources. */ shmctl(shminfo.shmid, IPC_RMID, 0); - if (isXShmAttachFailed() == JNI_FALSE) { + if (xShmAttachResult == JNI_TRUE) { canUseShmExt = CAN_USE_MITSHM; /* check if we can use shared pixmaps */ XShmQueryVersion(awt_display, &XShmMajor, &XShmMinor, @@ -992,6 +971,23 @@ } AWT_UNLOCK(); } + +/* + * Must be called with the acquired AWT lock. + */ +jboolean TryXShmAttach(JNIEnv *env, Display *display, XShmSegmentInfo *shminfo) { + jboolean errorOccurredFlag = JNI_FALSE; + jobject errorHandlerRef; + + /* + * XShmAttachHandler will set its internal flag to JNI_TRUE, if any Shm error occurs. + */ + EXEC_WITH_XERROR_HANDLER(env, "sun/awt/X11/XErrorHandler$XShmAttachHandler", + "()Lsun/awt/X11/XErrorHandler$XShmAttachHandler;", JNI_TRUE, + errorHandlerRef, errorOccurredFlag, + XShmAttach(display, shminfo)); + return errorOccurredFlag == JNI_FALSE ? JNI_TRUE : JNI_FALSE; +} #endif /* MITSHM */ /*
--- a/src/solaris/native/sun/awt/awt_GraphicsEnv.h Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/awt/awt_GraphicsEnv.h Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,8 +50,7 @@ extern int XShmQueryExtension(); void TryInitMITShm(JNIEnv *env, jint *shmExt, jint *shmPixmaps); -void resetXShmAttachFailed(); -jboolean isXShmAttachFailed(); +jboolean TryXShmAttach(JNIEnv *env, Display *display, XShmSegmentInfo *shminfo); #endif /* MITSHM */
--- a/src/solaris/native/sun/awt/awt_InputMethod.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/awt/awt_InputMethod.c Wed Jun 05 13:10:11 2013 -0300 @@ -185,7 +185,6 @@ ); #endif -#ifdef XAWT_HACK /* * This function is stolen from /src/solaris/hpi/src/system_md.c * It is used in setting the time in Java-level InputEvents @@ -197,7 +196,6 @@ gettimeofday(&t, NULL); return ((jlong)t.tv_sec) * 1000 + (jlong)(t.tv_usec/1000); } -#endif /* XAWT_HACK */ /* * Converts the wchar_t string to a multi-byte string calling wcstombs(). A @@ -546,11 +544,7 @@ "dispatchCommittedText", "(Ljava/lang/String;J)V", javastr, -#ifndef XAWT_HACK - awt_util_nowMillisUTC_offset(event->time)); -#else event->time); -#endif } break;
--- a/src/solaris/native/sun/awt/awt_util.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/awt/awt_util.c Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,18 +41,6 @@ #include "java_awt_event_MouseWheelEvent.h" -/* - * Since X reports protocol errors asynchronously, we often need to - * install an error handler that acts like a callback. While that - * specialized handler is installed we save original handler here. - */ -XErrorHandler xerror_saved_handler; - -/* - * A place for error handler to report the error code. - */ -unsigned char xerror_code; - extern jint getModifiers(uint32_t state, jint button, jint keyCode); extern jint getButton(uint32_t button);
--- a/src/solaris/native/sun/awt/awt_util.h Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/awt/awt_util.h Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,42 +29,47 @@ #ifndef HEADLESS #include "gdefs.h" -#define WITH_XERROR_HANDLER(f) do { \ - XSync(awt_display, False); \ - xerror_code = Success; \ - xerror_saved_handler = XSetErrorHandler(f); \ -} while (0) - -/* Convenience macro for handlers to use */ -#define XERROR_SAVE(err) do { \ - xerror_code = (err)->error_code; \ -} while (0) - -#define RESTORE_XERROR_HANDLER do { \ - XSync(awt_display, False); \ - XSetErrorHandler(xerror_saved_handler); \ -} while (0) - -#define EXEC_WITH_XERROR_HANDLER(f, code) do { \ - WITH_XERROR_HANDLER(f); \ - do { \ - code; \ - } while (0); \ - RESTORE_XERROR_HANDLER; \ +/* + * Expected types of arguments of the macro. + * (JNIEnv*, const char*, const char*, jboolean, jobject) + */ +#define WITH_XERROR_HANDLER(env, handlerClassName, getInstanceSignature, \ + handlerHasFlag, handlerRef) do { \ + handlerRef = JNU_CallStaticMethodByName(env, NULL, handlerClassName, "getInstance", \ + getInstanceSignature).l; \ + if (handlerHasFlag == JNI_TRUE) { \ + JNU_CallMethodByName(env, NULL, handlerRef, "setErrorOccurredFlag", "(Z)V", JNI_FALSE); \ + } \ + JNU_CallStaticMethodByName(env, NULL, "sun/awt/X11/XErrorHandlerUtil", "WITH_XERROR_HANDLER", \ + "(Lsun/awt/X11/XErrorHandler;)V", handlerRef); \ } while (0) /* - * Since X reports protocol errors asynchronously, we often need to - * install an error handler that acts like a callback. While that - * specialized handler is installed we save original handler here. + * Expected types of arguments of the macro. + * (JNIEnv*) */ -extern XErrorHandler xerror_saved_handler; +#define RESTORE_XERROR_HANDLER(env) do { \ + JNU_CallStaticMethodByName(env, NULL, "sun/awt/X11/XErrorHandlerUtil", \ + "RESTORE_XERROR_HANDLER", "()V"); \ +} while (0) /* - * A place for error handler to report the error code. + * Expected types of arguments of the macro. + * (JNIEnv*, const char*, const char*, jboolean, jobject, jboolean, No type - C expression) */ -extern unsigned char xerror_code; - +#define EXEC_WITH_XERROR_HANDLER(env, handlerClassName, getInstanceSignature, handlerHasFlag, \ + handlerRef, errorOccurredFlag, code) do { \ + handlerRef = NULL; \ + WITH_XERROR_HANDLER(env, handlerClassName, getInstanceSignature, handlerHasFlag, handlerRef); \ + do { \ + code; \ + } while (0); \ + RESTORE_XERROR_HANDLER(env); \ + if (handlerHasFlag == JNI_TRUE) { \ + errorOccurredFlag = JNU_CallMethodByName(env, NULL, handlerRef, "getErrorOccurredFlag", \ + "()Z").z; \ + } \ +} while (0) #endif /* !HEADLESS */ #ifndef INTERSECTS
--- a/src/solaris/native/sun/java2d/opengl/GLXSurfaceData.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/java2d/opengl/GLXSurfaceData.c Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,8 +48,6 @@ extern void OGLSD_SetNativeDimensions(JNIEnv *env, OGLSDOps *oglsdo, jint w, jint h); -jboolean surfaceCreationFailed = JNI_FALSE; - #endif /* !HEADLESS */ JNIEXPORT void JNICALL @@ -349,18 +347,6 @@ return JNI_TRUE; } -static int -GLXSD_BadAllocXErrHandler(Display *display, XErrorEvent *xerr) -{ - int ret = 0; - if (xerr->error_code == BadAlloc) { - surfaceCreationFailed = JNI_TRUE; - } else { - ret = (*xerror_saved_handler)(display, xerr); - } - return ret; -} - JNIEXPORT jboolean JNICALL Java_sun_java2d_opengl_GLXSurfaceData_initPbuffer (JNIEnv *env, jobject glxsd, @@ -376,6 +362,8 @@ int attrlist[] = {GLX_PBUFFER_WIDTH, 0, GLX_PBUFFER_HEIGHT, 0, GLX_PRESERVED_CONTENTS, GL_FALSE, 0}; + jboolean errorOccurredFlag; + jobject errorHandlerRef; J2dTraceLn3(J2D_TRACE_INFO, "GLXSurfaceData_initPbuffer: w=%d h=%d opq=%d", @@ -403,12 +391,13 @@ attrlist[1] = width; attrlist[3] = height; - surfaceCreationFailed = JNI_FALSE; - EXEC_WITH_XERROR_HANDLER( - GLXSD_BadAllocXErrHandler, - pbuffer = j2d_glXCreatePbuffer(awt_display, - glxinfo->fbconfig, attrlist)); - if ((pbuffer == 0) || surfaceCreationFailed) { + errorOccurredFlag = JNI_FALSE; + EXEC_WITH_XERROR_HANDLER(env, "sun/awt/X11/XErrorHandler$GLXBadAllocHandler", + "()Lsun/awt/X11/XErrorHandler$GLXBadAllocHandler;", JNI_TRUE, + errorHandlerRef, errorOccurredFlag, + pbuffer = j2d_glXCreatePbuffer(awt_display, glxinfo->fbconfig, attrlist)); + + if ((pbuffer == 0) || errorOccurredFlag) { J2dRlsTraceLn(J2D_TRACE_ERROR, "GLXSurfaceData_initPbuffer: could not create glx pbuffer"); return JNI_FALSE;
--- a/src/solaris/native/sun/java2d/x11/X11SurfaceData.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/java2d/x11/X11SurfaceData.c Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,6 @@ static DisposeFunc X11SD_Dispose; static GetPixmapBgFunc X11SD_GetPixmapWithBg; static ReleasePixmapBgFunc X11SD_ReleasePixmapWithBg; -extern int J2DXErrHandler(Display *display, XErrorEvent *xerr); extern AwtGraphicsConfigDataPtr getGraphicsConfigFromComponentPeer(JNIEnv *env, jobject this); extern struct X11GraphicsConfigIDs x11GraphicsConfigIDs; @@ -521,6 +520,8 @@ { XImage *img = NULL; XShmSegmentInfo *shminfo; + JNIEnv* env; + jboolean xShmAttachResult; shminfo = malloc(sizeof(XShmSegmentInfo)); if (shminfo == NULL) { @@ -559,9 +560,8 @@ shminfo->readOnly = False; - resetXShmAttachFailed(); - EXEC_WITH_XERROR_HANDLER(J2DXErrHandler, - XShmAttach(awt_display, shminfo)); + env = (JNIEnv*)JNU_GetEnv(jvm, JNI_VERSION_1_2); + xShmAttachResult = TryXShmAttach(env, awt_display, shminfo); /* * Once the XSync round trip has finished then we @@ -570,7 +570,7 @@ */ shmctl(shminfo->shmid, IPC_RMID, 0); - if (isXShmAttachFailed() == JNI_TRUE) { + if (xShmAttachResult == JNI_FALSE) { J2dRlsTraceLn1(J2D_TRACE_ERROR, "X11SD_SetupSharedSegment XShmAttach has failed: %s", strerror(errno));
--- a/src/solaris/native/sun/xawt/XlibWrapper.c Wed May 29 13:22:58 2013 -0300 +++ b/src/solaris/native/sun/xawt/XlibWrapper.c Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1264,8 +1264,8 @@ if (jvm != NULL) { env = (JNIEnv *)JNU_GetEnv(jvm, JNI_VERSION_1_2); if (env) { - return JNU_CallStaticMethodByName(env, NULL, "sun/awt/X11/XToolkit", "globalErrorHandler", "(JJ)I", - ptr_to_jlong(dpy), ptr_to_jlong(event)).i; + return JNU_CallStaticMethodByName(env, NULL, "sun/awt/X11/XErrorHandlerUtil", + "globalErrorHandler", "(JJ)I", ptr_to_jlong(dpy), ptr_to_jlong(event)).i; } } return 0;
--- a/src/windows/classes/sun/awt/windows/WPathGraphics.java Wed May 29 13:22:58 2013 -0300 +++ b/src/windows/classes/sun/awt/windows/WPathGraphics.java Wed Jun 05 13:10:11 2013 -0300 @@ -549,6 +549,8 @@ userx += xAdvance; userpos.x += xAdvance; deviceTransform.transform(userpos, devpos); + devx = devpos.x; + devy = devpos.y; } } else { super.drawString(str, x, y, font, frc, targetW);
--- a/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java Wed May 29 13:22:58 2013 -0300 +++ b/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java Wed Jun 05 13:10:11 2013 -0300 @@ -48,7 +48,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.spi.CalendarDataProvider; -import java.util.spi.CalendarNameProvider; import java.util.spi.CurrencyNameProvider; import java.util.spi.LocaleNameProvider; import sun.util.spi.CalendarProvider; @@ -364,32 +363,6 @@ }; } - public static CalendarNameProvider getCalendarNameProvider() { - return new CalendarNameProvider() { - @Override - public Locale[] getAvailableLocales() { - return getSupportedCalendarLocales(); - } - - @Override - public boolean isSupportedLocale(Locale locale) { - return isSupportedCalendarLocale(locale); - } - - @Override - public String getDisplayName(String calType, int field, int value, - int style, Locale locale) { - return null; - } - - @Override - public Map<String, Integer> getDisplayNames(String calType, - int field, int style, Locale locale) { - return null; - } - }; - } - public static CalendarProvider getCalendarProvider() { return new CalendarProvider() { @Override
--- a/src/windows/native/java/net/DualStackPlainDatagramSocketImpl.c Wed May 29 13:22:58 2013 -0300 +++ b/src/windows/native/java/net/DualStackPlainDatagramSocketImpl.c Wed Jun 05 13:10:11 2013 -0300 @@ -256,14 +256,14 @@ packetBuffer = (*env)->GetObjectField(env, dpObj, dp_bufID); packetBufferOffset = (*env)->GetIntField(env, dpObj, dp_offsetID); packetBufferLen = (*env)->GetIntField(env, dpObj, dp_bufLengthID); + /* Note: the buffer needn't be greater than 65,536 (0xFFFF) + * the max size of an IP packet. Anything bigger is truncated anyway. + */ + if (packetBufferLen > MAX_PACKET_LEN) { + packetBufferLen = MAX_PACKET_LEN; + } if (packetBufferLen > MAX_BUFFER_LEN) { - /* Note: the buffer needn't be greater than 65,536 (0xFFFF) - * the max size of an IP packet. Anything bigger is truncated anyway. - */ - if (packetBufferLen > MAX_PACKET_LEN) { - packetBufferLen = MAX_PACKET_LEN; - } fullPacket = (char *)malloc(packetBufferLen); if (!fullPacket) { JNU_ThrowOutOfMemoryError(env, "Native heap allocation failed");
--- a/src/windows/native/java/net/TwoStacksPlainDatagramSocketImpl.c Wed May 29 13:22:58 2013 -0300 +++ b/src/windows/native/java/net/TwoStacksPlainDatagramSocketImpl.c Wed Jun 05 13:10:11 2013 -0300 @@ -145,7 +145,7 @@ /* * This function returns JNI_TRUE if the datagram size exceeds the underlying * provider's ability to send to the target address. The following OS - * oddies have been observed :- + * oddities have been observed :- * * 1. On Windows 95/98 if we try to send a datagram > 12k to an application * on the same machine then the send will fail silently. @@ -218,7 +218,7 @@ /* * Step 3: On Windows 95/98 then enumerate the IP addresses on - * this machine. This is necesary because we need to check if the + * this machine. This is neccesary because we need to check if the * datagram is being sent to an application on the same machine. */ if (is95or98) { @@ -565,8 +565,8 @@ if (xp_or_later) { /* SIO_UDP_CONNRESET fixes a bug introduced in Windows 2000, which - * returns connection reset errors un connected UDP sockets (as well - * as connected sockets. The solution is to only enable this feature + * returns connection reset errors on connected UDP sockets (as well + * as connected sockets). The solution is to only enable this feature * when the socket is connected */ DWORD x1, x2; /* ignored result codes */ @@ -690,6 +690,12 @@ fd = (*env)->GetIntField(env, fdObj, IO_fd_fdID); packetBufferLen = (*env)->GetIntField(env, packet, dp_lengthID); + /* Note: the buffer needn't be greater than 65,536 (0xFFFF)... + * the maximum size of an IP packet. Anything bigger is truncated anyway. + */ + if (packetBufferLen > MAX_PACKET_LEN) { + packetBufferLen = MAX_PACKET_LEN; + } if (connected) { addrp = 0; /* arg to JVM_Sendto () null in this case */ @@ -728,7 +734,7 @@ } /* When JNI-ifying the JDK's IO routines, we turned - * read's and write's of byte arrays of size greater + * reads and writes of byte arrays of size greater * than 2048 bytes into several operations of size 2048. * This saves a malloc()/memcpy()/free() for big * buffers. This is OK for file IO and TCP, but that
--- a/test/ProblemList.txt Wed May 29 13:22:58 2013 -0300 +++ b/test/ProblemList.txt Wed Jun 05 13:10:11 2013 -0300 @@ -122,9 +122,6 @@ # jdk_lang -# 8009615 -java/lang/instrument/IsModifiableClassAgent.java generic-all - # 6944188 java/lang/management/ThreadMXBean/ThreadStateTest.java generic-all @@ -137,6 +134,9 @@ # 8008200 java/lang/Class/asSubclass/BasicUnit.java generic-all +# 8015780 +java/lang/reflect/Method/GenericStringTest.java generic-all + ############################################################################ # jdk_management @@ -199,12 +199,6 @@ # 7143960 java/net/DatagramSocket/SendDatagramToBadAddress.java macosx-all -# 8014720 -java/net/ResponseCache/B6181108.java generic-all - -# 8014723 -sun/misc/URLClassPath/ClassnameCharTest.java generic-all - # 8014719 sun/net/www/http/HttpClient/ProxyTest.java generic-all @@ -236,9 +230,6 @@ # 7132677 java/nio/channels/Selector/OutOfBand.java macosx-all -# 8003895 -java/nio/channels/AsynchronousChannelGroup/Unbounded.java windows-amd64 - ############################################################################ # jdk_rmi @@ -277,6 +268,13 @@ sun/security/pkcs11/ec/ReadPKCS12.java solaris-all sun/security/pkcs11/sslecc/ClientJSSEServerJSSE.java solaris-all +# 8005247 +sun/security/pkcs11/ec/TestECDSA.java solaris-all + +# 8009438 +sun/security/pkcs11/Secmod/AddPrivateKey.java linux-all +sun/security/pkcs11/Secmod/TrustAnchors.java linux-all + # 7041639, Solaris DSA keypair generation bug (Note: jdk_util also affected) java/security/KeyPairGenerator/SolarisShortDSA.java solaris-all sun/security/tools/jarsigner/onlymanifest.sh solaris-all @@ -331,6 +329,8 @@ # Tests take too long, on sparcs see 7143279 tools/pack200/CommandLineTests.java solaris-all, macosx-all tools/pack200/Pack200Test.java solaris-all, macosx-all +# 8015666 +tools/pack200/TimeStamp.java generic-all # 8007410 tools/launcher/FXLauncherTest.java linux-all
--- a/test/com/sun/crypto/provider/Mac/HmacPBESHA1.java Wed May 29 13:22:58 2013 -0300 +++ b/test/com/sun/crypto/provider/Mac/HmacPBESHA1.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,8 +23,8 @@ /** * @test - * @bug 4893959 - * @summary basic test for HmacPBESHA1 + * @bug 4893959 8013069 + * @summary basic test for PBE MAC algorithms. * @author Valerie Peng */ import java.io.PrintStream; @@ -68,8 +68,9 @@ } Mac mac = Mac.getInstance(algo, PROVIDER); byte[] plainText = new byte[30]; - - mac.init(key); + PBEParameterSpec spec = + new PBEParameterSpec("saltValue".getBytes(), 250); + mac.init(key, spec); mac.update(plainText); byte[] value1 = mac.doFinal(); if (value1.length != length) {
--- a/test/com/sun/crypto/provider/Mac/MacClone.java Wed May 29 13:22:58 2013 -0300 +++ b/test/com/sun/crypto/provider/Mac/MacClone.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,12 +23,13 @@ /* * @test - * @bug 7087021 - * @summary MacClone + * @bug 7087021 8013069 + * @summary Clone tests for all MAC algorithms. * @author Jan Luehe */ +import java.security.spec.AlgorithmParameterSpec; import javax.crypto.*; -import javax.crypto.spec.SecretKeySpec; +import javax.crypto.spec.*; public class MacClone { @@ -39,18 +40,23 @@ KeyGenerator kgen = KeyGenerator.getInstance("DES"); SecretKey skey = kgen.generateKey(); for (String algo : algos) { - doTest(algo, skey); + doTest(algo, skey, null); } - String[] algos2 = { "HmacPBESHA1" }; + String[] algos2 = { "HmacPBESHA1", "PBEWithHmacSHA1", + "PBEWithHmacSHA224", "PBEWithHmacSHA256", + "PBEWithHmacSHA384", "PBEWithHmacSHA512" }; skey = new SecretKeySpec("whatever".getBytes(), "PBE"); + PBEParameterSpec params = + new PBEParameterSpec("1234567890".getBytes(), 500); for (String algo : algos2) { - doTest(algo, skey); + doTest(algo, skey, params); } System.out.println("Test Passed"); } - private static void doTest(String algo, SecretKey skey) throws Exception { + private static void doTest(String algo, SecretKey skey, + AlgorithmParameterSpec params) throws Exception { // // Clone an uninitialized Mac object // @@ -72,7 +78,7 @@ // Clone an initialized Mac object // mac = Mac.getInstance(algo, "SunJCE"); - mac.init(skey); + mac.init(skey, params); macClone = (Mac)mac.clone(); System.out.println(macClone.getProvider().toString()); System.out.println(macClone.getAlgorithm());
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/com/sun/crypto/provider/TLS/TestLeadingZeroes.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8014618 + * @summary Need to strip leading zeros in TlsPremasterSecret of DHKeyAgreement + * @author Pasi Eronen + */ + +import java.io.*; +import java.security.*; +import java.security.spec.*; +import java.security.interfaces.*; +import javax.crypto.*; +import javax.crypto.spec.*; +import javax.crypto.interfaces.*; +import com.sun.crypto.provider.SunJCE; + +/** + * Test that leading zeroes are stripped in TlsPremasterSecret case, + * but are left as-is in other cases. + * + * We use pre-generated keypairs, since with randomly generated keypairs, + * a leading zero happens only (roughly) 1 out of 256 cases. + */ + +public class TestLeadingZeroes { + + private static final String SUNJCE = "SunJCE"; + + private TestLeadingZeroes() {} + + public static void main(String argv[]) throws Exception { + // Add JCE to the list of providers + SunJCE jce = new SunJCE(); + Security.addProvider(jce); + + TestLeadingZeroes keyAgree = new TestLeadingZeroes(); + keyAgree.run(); + System.out.println("Test Passed"); + } + + private void run() throws Exception { + + // decode pre-generated keypairs + KeyFactory kfac = KeyFactory.getInstance("DH"); + PublicKey alicePubKey = + kfac.generatePublic(new X509EncodedKeySpec(alicePubKeyEnc)); + PublicKey bobPubKey = + kfac.generatePublic(new X509EncodedKeySpec(bobPubKeyEnc)); + PrivateKey alicePrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(alicePrivKeyEnc)); + PrivateKey bobPrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(bobPrivKeyEnc)); + + // generate normal shared secret + KeyAgreement aliceKeyAgree = KeyAgreement.getInstance("DH", SUNJCE); + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] sharedSecret = aliceKeyAgree.generateSecret(); + System.out.println("shared secret:\n" + toHexString(sharedSecret)); + + // verify that leading zero is present + if (sharedSecret.length != 128) { + throw new Exception("Unexpected shared secret length"); + } + if (sharedSecret[0] != 0) { + throw new Exception("First byte is not zero as expected"); + } + + // now, test TLS premaster secret + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] tlsPremasterSecret = + aliceKeyAgree.generateSecret("TlsPremasterSecret").getEncoded(); + System.out.println( + "tls premaster secret:\n" + toHexString(tlsPremasterSecret)); + + // check that leading zero has been stripped + if (tlsPremasterSecret.length != 127) { + throw new Exception("Unexpected TLS premaster secret length"); + } + if (tlsPremasterSecret[0] == 0) { + throw new Exception("First byte is zero"); + } + for (int i = 0; i < tlsPremasterSecret.length; i++) { + if (tlsPremasterSecret[i] != sharedSecret[i+1]) { + throw new Exception("Shared secrets differ"); + } + } + + } + + /* + * Converts a byte to hex digit and writes to the supplied buffer + */ + private void byte2hex(byte b, StringBuffer buf) { + char[] hexChars = { '0', '1', '2', '3', '4', '5', '6', '7', '8', + '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + int high = ((b & 0xf0) >> 4); + int low = (b & 0x0f); + buf.append(hexChars[high]); + buf.append(hexChars[low]); + } + + /* + * Converts a byte array to hex string + */ + private String toHexString(byte[] block) { + StringBuffer buf = new StringBuffer(); + + int len = block.length; + + for (int i = 0; i < len; i++) { + byte2hex(block[i], buf); + if (i < len-1) { + buf.append(":"); + } + } + return buf.toString(); + } + + private static final byte alicePubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x24, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x85, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x81, (byte)0x00, + (byte)0xEE, (byte)0xD6, (byte)0xB1, (byte)0xA3, + (byte)0xB4, (byte)0x78, (byte)0x2B, (byte)0x35, + (byte)0xEF, (byte)0xCD, (byte)0x17, (byte)0x86, + (byte)0x63, (byte)0x2B, (byte)0x97, (byte)0x0E, + (byte)0x7A, (byte)0xD1, (byte)0xFF, (byte)0x7A, + (byte)0xEB, (byte)0x57, (byte)0x61, (byte)0xA1, + (byte)0xF7, (byte)0x90, (byte)0x11, (byte)0xA7, + (byte)0x79, (byte)0x28, (byte)0x69, (byte)0xBA, + (byte)0xA7, (byte)0xB2, (byte)0x37, (byte)0x17, + (byte)0xAE, (byte)0x3C, (byte)0x92, (byte)0x89, + (byte)0x88, (byte)0xE5, (byte)0x7E, (byte)0x8E, + (byte)0xF0, (byte)0x24, (byte)0xD0, (byte)0xE1, + (byte)0xC4, (byte)0xB0, (byte)0x26, (byte)0x5A, + (byte)0x1E, (byte)0xBD, (byte)0xA0, (byte)0xCF, + (byte)0x3E, (byte)0x97, (byte)0x2A, (byte)0x13, + (byte)0x92, (byte)0x3B, (byte)0x39, (byte)0xD0, + (byte)0x1D, (byte)0xA3, (byte)0x6B, (byte)0x3E, + (byte)0xC2, (byte)0xBB, (byte)0x14, (byte)0xB6, + (byte)0xE2, (byte)0x4C, (byte)0x0E, (byte)0x5B, + (byte)0x4B, (byte)0xA4, (byte)0x9D, (byte)0xA6, + (byte)0x21, (byte)0xB0, (byte)0xF9, (byte)0xDE, + (byte)0x55, (byte)0xAE, (byte)0x5C, (byte)0x29, + (byte)0x0E, (byte)0xC1, (byte)0xFC, (byte)0xBA, + (byte)0x51, (byte)0xD3, (byte)0xB6, (byte)0x6D, + (byte)0x75, (byte)0x72, (byte)0xDF, (byte)0x43, + (byte)0xAB, (byte)0x94, (byte)0x21, (byte)0x6E, + (byte)0x0C, (byte)0xD1, (byte)0x93, (byte)0x54, + (byte)0x56, (byte)0x7D, (byte)0x4B, (byte)0x90, + (byte)0xF1, (byte)0x94, (byte)0x45, (byte)0xD4, + (byte)0x2A, (byte)0x71, (byte)0xA1, (byte)0xB8, + (byte)0xDD, (byte)0xAA, (byte)0x05, (byte)0xF0, + (byte)0x27, (byte)0x37, (byte)0xBD, (byte)0x44 + }; + + private static final byte alicePrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE3, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x42, + (byte)0x02, (byte)0x40, (byte)0x36, (byte)0x4D, + (byte)0xD0, (byte)0x58, (byte)0x64, (byte)0x91, + (byte)0x78, (byte)0xA2, (byte)0x4B, (byte)0x79, + (byte)0x46, (byte)0xFE, (byte)0xC9, (byte)0xD9, + (byte)0xCA, (byte)0x5C, (byte)0xF9, (byte)0xFD, + (byte)0x6C, (byte)0x5D, (byte)0x76, (byte)0x3A, + (byte)0x41, (byte)0x6D, (byte)0x44, (byte)0x62, + (byte)0x75, (byte)0x93, (byte)0x81, (byte)0x93, + (byte)0x00, (byte)0x4C, (byte)0xB1, (byte)0xD8, + (byte)0x7D, (byte)0x9D, (byte)0xF3, (byte)0x16, + (byte)0x2C, (byte)0x6C, (byte)0x9F, (byte)0x7A, + (byte)0x84, (byte)0xA3, (byte)0x7A, (byte)0xC1, + (byte)0x4F, (byte)0x60, (byte)0xE3, (byte)0xB5, + (byte)0x86, (byte)0x28, (byte)0x08, (byte)0x4D, + (byte)0x94, (byte)0xB6, (byte)0x04, (byte)0x0D, + (byte)0xAC, (byte)0xBD, (byte)0x1F, (byte)0x42, + (byte)0x8F, (byte)0x1B + }; + + private static final byte bobPubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x23, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x84, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x80, (byte)0x2C, + (byte)0x40, (byte)0xFA, (byte)0xF6, (byte)0xA6, + (byte)0xF8, (byte)0xAC, (byte)0xC2, (byte)0x4F, + (byte)0xCD, (byte)0xC7, (byte)0x37, (byte)0x93, + (byte)0xE5, (byte)0xE4, (byte)0x5E, (byte)0x18, + (byte)0x14, (byte)0xE6, (byte)0x50, (byte)0xDA, + (byte)0x55, (byte)0x38, (byte)0x5D, (byte)0x24, + (byte)0xF5, (byte)0x42, (byte)0x68, (byte)0x5F, + (byte)0xF5, (byte)0x15, (byte)0xC8, (byte)0x9B, + (byte)0x5D, (byte)0x06, (byte)0x3D, (byte)0xE1, + (byte)0x52, (byte)0x2F, (byte)0x98, (byte)0xFF, + (byte)0x37, (byte)0xBB, (byte)0x75, (byte)0x48, + (byte)0x48, (byte)0xE9, (byte)0x65, (byte)0x84, + (byte)0x37, (byte)0xBB, (byte)0xB3, (byte)0xE9, + (byte)0x36, (byte)0x01, (byte)0xB4, (byte)0x6A, + (byte)0x1C, (byte)0xB2, (byte)0x11, (byte)0x82, + (byte)0xCE, (byte)0x3D, (byte)0x65, (byte)0xE5, + (byte)0x3C, (byte)0x89, (byte)0xE9, (byte)0x52, + (byte)0x19, (byte)0xBD, (byte)0x58, (byte)0xF6, + (byte)0xA2, (byte)0x03, (byte)0xA8, (byte)0xB2, + (byte)0xA5, (byte)0xDB, (byte)0xEB, (byte)0xF5, + (byte)0x94, (byte)0xF9, (byte)0x46, (byte)0xBE, + (byte)0x45, (byte)0x4C, (byte)0x65, (byte)0xD2, + (byte)0xD1, (byte)0xCF, (byte)0xFF, (byte)0xFF, + (byte)0xFA, (byte)0x38, (byte)0xF1, (byte)0x72, + (byte)0xAB, (byte)0xB9, (byte)0x14, (byte)0x4E, + (byte)0xF5, (byte)0xF0, (byte)0x7A, (byte)0x8E, + (byte)0x45, (byte)0xFD, (byte)0x5B, (byte)0xF9, + (byte)0xA2, (byte)0x97, (byte)0x1B, (byte)0xAE, + (byte)0x2C, (byte)0x7B, (byte)0x6B, (byte)0x7C, + (byte)0x98, (byte)0xFE, (byte)0x58, (byte)0xDD, + (byte)0xBE, (byte)0xF6, (byte)0x1C, (byte)0x8E, + (byte)0xD0, (byte)0xA1, (byte)0x72 + }; + + private static final byte bobPrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE4, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x43, + (byte)0x02, (byte)0x41, (byte)0x00, (byte)0xE0, + (byte)0x31, (byte)0xE7, (byte)0x77, (byte)0xB8, + (byte)0xD0, (byte)0x7E, (byte)0x0A, (byte)0x9B, + (byte)0x94, (byte)0xD5, (byte)0x3D, (byte)0x33, + (byte)0x62, (byte)0x32, (byte)0x51, (byte)0xCE, + (byte)0x74, (byte)0x5C, (byte)0xA5, (byte)0x72, + (byte)0xD9, (byte)0x36, (byte)0xF3, (byte)0x8A, + (byte)0x3F, (byte)0x8B, (byte)0xC6, (byte)0xFE, + (byte)0xEF, (byte)0x94, (byte)0x8B, (byte)0x50, + (byte)0x41, (byte)0x9B, (byte)0x14, (byte)0xC8, + (byte)0xE9, (byte)0x1F, (byte)0x24, (byte)0x1F, + (byte)0x65, (byte)0x8E, (byte)0xD3, (byte)0x85, + (byte)0xD0, (byte)0x68, (byte)0x6C, (byte)0xF1, + (byte)0x79, (byte)0x45, (byte)0xD0, (byte)0x06, + (byte)0xA4, (byte)0xB8, (byte)0xE0, (byte)0x64, + (byte)0xF5, (byte)0x38, (byte)0x72, (byte)0x97, + (byte)0x00, (byte)0x23, (byte)0x5F + }; +} +
--- a/test/com/sun/jmx/remote/NotificationMarshalVersions/TestSerializationMismatch.java Wed May 29 13:22:58 2013 -0300 +++ b/test/com/sun/jmx/remote/NotificationMarshalVersions/TestSerializationMismatch.java Wed Jun 05 13:10:11 2013 -0300 @@ -12,7 +12,7 @@ * @bug 6937053 8005472 * * @run clean TestSerializationMismatch - * @run main TestSerializationMismatch + * @run main/othervm TestSerializationMismatch * */ public class TestSerializationMismatch {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanDoubleInvocationTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Basic Test for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8125 DcmdMBeanDoubleInvocationTest + */ + + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.*; +import javax.management.remote.*; + +public class DcmdMBeanDoubleInvocationTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static void main(String[] args) { + MBeanServerConnection mbs = null; + try { + JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://localhost:8125/jmxrmi"); + JMXConnector connector = JMXConnectorFactory.connect(url); + mbs = connector.getMBeanServerConnection(); + } catch(Throwable t) { + t.printStackTrace(); + } + ObjectName name; + try { + name = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + MBeanInfo info = mbs.getMBeanInfo(name); + String[] helpArgs = {"-all", "\n", "VM.version"}; + Object[] dcmdArgs = {helpArgs}; + String[] signature = {String[].class.getName()}; + String result = (String) mbs.invoke(name, "help", dcmdArgs, signature); + System.out.println(result); + } catch (RuntimeMBeanException ex) { + if (ex.getCause() instanceof IllegalArgumentException) { + System.out.println("Test passed"); + return; + } else { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + } catch (InstanceNotFoundException | IntrospectionException + | ReflectionException | MalformedObjectNameException + | MBeanException|IOException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + System.out.println("Double commands have not been detected"); + throw new RuntimeException("TEST FAILED"); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanInvocationTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Basic Test for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8129 DcmdMBeanInvocationTest + */ + + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.*; +import javax.management.remote.*; + +public class DcmdMBeanInvocationTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static void main(String[] args) { + MBeanServerConnection mbs = null; + try { + JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://localhost:8129/jmxrmi"); + JMXConnector connector = JMXConnectorFactory.connect(url); + mbs = connector.getMBeanServerConnection(); + } catch(Throwable t) { + t.printStackTrace(); + } + ObjectName name; + try { + name = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + MBeanInfo info = mbs.getMBeanInfo(name); + String[] helpArgs = {"-all"}; + Object[] dcmdArgs = {helpArgs}; + String[] signature = {String[].class.getName()}; + String result = (String) mbs.invoke(name, "help", dcmdArgs, signature); + System.out.println(result); + } catch (InstanceNotFoundException | IntrospectionException + | ReflectionException | MalformedObjectNameException + | MBeanException|IOException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + System.out.println("Test passed"); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanPermissionsTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Permissions Tests for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm DcmdMBeanPermissionsTest + */ + +import java.lang.management.ManagementFactory; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.ReflectPermission; +import java.security.Permission; +import java.util.HashSet; +import java.util.Iterator; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanPermission; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.RuntimeMBeanException; + +/** + * + * @author fparain + */ +public class DcmdMBeanPermissionsTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + static public class CustomSecurityManager extends SecurityManager { + + private HashSet<Permission> grantedPermissions; + + public CustomSecurityManager() { + grantedPermissions = new HashSet<Permission>(); + } + + public final void grantPermission(final Permission perm) { + grantedPermissions.add(perm); + } + + public final void denyPermission(final Permission perm) { + Iterator<Permission> it = grantedPermissions.iterator(); + while (it.hasNext()) { + Permission p = it.next(); + if (p.equals(perm)) { + it.remove(); + } + } + } + + public final void checkPermission(final Permission perm) { + for (Permission p : grantedPermissions) { + if (p.implies(perm)) { + return; + } + } + throw new SecurityException(perm.toString()); + } + }; + + static Permission createPermission(String classname, String name, + String action) { + Permission permission = null; + try { + Class c = Class.forName(classname); + if (action == null) { + try { + Constructor constructor = c.getConstructor(String.class); + permission = (Permission) constructor.newInstance(name); + + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + } + if (permission == null) { + try { + Constructor constructor = c.getConstructor(String.class, + String.class); + permission = (Permission) constructor.newInstance( + name, + action); + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + } + } catch (ClassNotFoundException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + if (permission == null) { + throw new RuntimeException("TEST FAILED"); + } + return permission; + } + + // return true if invokation triggered a SecurityException + static boolean invokeOperation(MBeanServer mbs, ObjectName on, + MBeanOperationInfo opInfo) { + try { + if (opInfo.getSignature().length == 0) { + mbs.invoke(on, opInfo.getName(), + new Object[0], new String[0]); + } else { + mbs.invoke(on, opInfo.getName(), + new Object[1], new String[]{ String[].class.getName()}); + } + } catch (SecurityException ex) { + ex.printStackTrace(); + return true; + } catch (RuntimeMBeanException ex) { + if (ex.getCause() instanceof SecurityException) { + //ex.printStackTrace(); + return true; + } + } catch (MBeanException | InstanceNotFoundException + | ReflectionException ex) { + throw new RuntimeException("TEST FAILED"); + } + return false; + } + + static void testOperation(MBeanServer mbs, CustomSecurityManager sm, + ObjectName on, MBeanOperationInfo opInfo) { + System.out.println("Testing " + opInfo.getName()); + Descriptor desc = opInfo.getDescriptor(); + if (desc.getFieldValue("dcmd.permissionClass") == null) { + // No special permission required, execution should not trigger + // any security exception + if (invokeOperation(mbs, on, opInfo)) { + throw new RuntimeException("TEST FAILED"); + } + } else { + // Building the required permission + Permission reqPerm = createPermission( + (String)desc.getFieldValue("dcmd.permissionClass"), + (String)desc.getFieldValue("dcmd.permissionName"), + (String)desc.getFieldValue("dcmd.permissionAction")); + // Paranoid mode: check that the SecurityManager has not already + // been granted the permission + sm.denyPermission(reqPerm); + // A special permission is required for this operation, + // invoking it without the permission granted must trigger + // a security exception + if(!invokeOperation(mbs, on, opInfo)) { + throw new RuntimeException("TEST FAILED"); + } + // grant the permission and re-try invoking the operation + sm.grantPermission(reqPerm); + if(invokeOperation(mbs, on, opInfo)) { + throw new RuntimeException("TEST FAILED"); + } + // Clean up + sm.denyPermission(reqPerm); + } + } + + public static void main(final String[] args) { + final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName on = null; + try { + on = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + } catch (MalformedObjectNameException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + MBeanInfo info = null; + try { + info = mbs.getMBeanInfo(on); + } catch (InstanceNotFoundException | IntrospectionException + | ReflectionException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + CustomSecurityManager sm = new CustomSecurityManager(); + System.setSecurityManager(sm); + // Set of permission required to run the test cleanly + // Some permissions are required by the MBeanServer and other + // platform services (RuntimePermission("createClassLoader"), + // ReflectPermission("suppressAccessChecks"), + // java.util.logging.LoggingPermission("control"), + // RuntimePermission("exitVM.97")). + // Other permissions are required by commands being invoked + // in the test (for instance, RuntimePermission("modifyThreadGroup") + // and RuntimePermission("modifyThread") are checked when + // runFinalization() is invoked by the gcRunFinalization command. + sm.grantPermission(new RuntimePermission("createClassLoader")); + sm.grantPermission(new ReflectPermission("suppressAccessChecks")); + sm.grantPermission(new java.util.logging.LoggingPermission("control", "")); + sm.grantPermission(new java.lang.RuntimePermission("exitVM.97")); + sm.grantPermission(new java.lang.RuntimePermission("modifyThreadGroup")); + sm.grantPermission(new java.lang.RuntimePermission("modifyThread")); + for(MBeanOperationInfo opInfo : info.getOperations()) { + Permission opPermission = new MBeanPermission(info.getClassName(), + opInfo.getName(), + on, + "invoke"); + sm.grantPermission(opPermission); + testOperation(mbs, sm, on, opInfo); + sm.denyPermission(opPermission); + } + System.out.println("TEST PASSED"); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Basic Test for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8127 DcmdMBeanTest + */ + + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.*; +import javax.management.remote.*; + +public class DcmdMBeanTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static void main(String[] args) { + MBeanServerConnection mbs = null; + try { + JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://localhost:8127/jmxrmi"); + JMXConnector connector = JMXConnectorFactory.connect(url); + mbs = connector.getMBeanServerConnection(); + } catch(Throwable t) { + t.printStackTrace(); + } + ObjectName name; + try { + name = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + MBeanInfo info = mbs.getMBeanInfo(name); + // the test should check that the MBean doesn't have any + // Attribute, notification or constructor. Current version only + // check operations + System.out.println("Class Name:"+info.getClassName()); + System.out.println("Description:"+info.getDescription()); + MBeanOperationInfo[] opInfo = info.getOperations(); + System.out.println("Operations:"); + for(int i=0; i<opInfo.length; i++) { + printOperation(opInfo[i]); + System.out.println("\n@@@@@@\n"); + } + } catch (InstanceNotFoundException|IntrospectionException|ReflectionException + |MalformedObjectNameException|IOException ex) { + Logger.getLogger(DcmdMBeanTest.class.getName()).log(Level.SEVERE, null, ex); + } + } + + static void printOperation(MBeanOperationInfo info) { + System.out.println("Name: "+info.getName()); + System.out.println("Description: "+info.getDescription()); + System.out.println("Return Type: "+info.getReturnType()); + System.out.println("Impact: "+info.getImpact()); + Descriptor desc = info.getDescriptor(); + System.out.println("Descriptor"); + for(int i=0; i<desc.getFieldNames().length; i++) { + if(desc.getFieldNames()[i].compareTo("dcmd.arguments") == 0) { + System.out.println("\t"+desc.getFieldNames()[i]+":"); + Descriptor desc2 = + (Descriptor)desc.getFieldValue(desc.getFieldNames()[i]); + for(int j=0; j<desc2.getFieldNames().length; j++) { + System.out.println("\t\t"+desc2.getFieldNames()[j]+"="); + Descriptor desc3 = + (Descriptor)desc2.getFieldValue(desc2.getFieldNames()[j]); + for(int k=0; k<desc3.getFieldNames().length; k++) { + System.out.println("\t\t\t"+desc3.getFieldNames()[k]+"=" + +desc3.getFieldValue(desc3.getFieldNames()[k])); + } + } + } else { + System.out.println("\t"+desc.getFieldNames()[i]+"=" + +desc.getFieldValue(desc.getFieldNames()[i])); + } + } + } +} +
--- a/test/demo/zipfs/ZipFSTester.java Wed May 29 13:22:58 2013 -0300 +++ b/test/demo/zipfs/ZipFSTester.java Wed Jun 05 13:10:11 2013 -0300 @@ -29,6 +29,7 @@ import java.nio.file.attribute.*; import java.net.*; import java.util.*; +import java.util.concurrent.TimeUnit; import java.util.zip.*; import static java.nio.file.StandardOpenOption.*; @@ -48,6 +49,7 @@ test0(fs); test1(fs); test2(fs); // more tests + testTime(Paths.get(args[0])); } } @@ -337,6 +339,46 @@ Files.delete(fs3Path); } + // test file stamp + static void testTime(Path src) throws Exception { + // create a new filesystem, copy this file into it + Map<String, Object> env = new HashMap<String, Object>(); + env.put("create", "true"); + Path fsPath = getTempPath(); + FileSystem fs = newZipFileSystem(fsPath, env); + + System.out.println("test copy with timestamps..."); + // copyin + Path dst = getPathWithParents(fs, "me"); + Files.copy(src, dst, COPY_ATTRIBUTES); + checkEqual(src, dst); + + BasicFileAttributes attrs = Files + .getFileAttributeView(src, BasicFileAttributeView.class) + .readAttributes(); + System.out.println("mtime: " + attrs.lastModifiedTime()); + System.out.println("ctime: " + attrs.creationTime()); + System.out.println("atime: " + attrs.lastAccessTime()); + System.out.println(" ==============>"); + BasicFileAttributes dstAttrs = Files + .getFileAttributeView(dst, BasicFileAttributeView.class) + .readAttributes(); + System.out.println("mtime: " + dstAttrs.lastModifiedTime()); + System.out.println("ctime: " + dstAttrs.creationTime()); + System.out.println("atime: " + dstAttrs.lastAccessTime()); + + // 1-second granularity + if (attrs.lastModifiedTime().to(TimeUnit.SECONDS) != + dstAttrs.lastModifiedTime().to(TimeUnit.SECONDS) || + attrs.lastAccessTime().to(TimeUnit.SECONDS) != + dstAttrs.lastAccessTime().to(TimeUnit.SECONDS) || + attrs.creationTime().to(TimeUnit.SECONDS) != + dstAttrs.creationTime().to(TimeUnit.SECONDS)) { + throw new RuntimeException("Timestamp Copy Failed!"); + } + Files.delete(fsPath); + } + private static FileSystem newZipFileSystem(Path path, Map<String, ?> env) throws Exception {
--- a/test/demo/zipfs/basic.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/demo/zipfs/basic.sh Wed Jun 05 13:10:11 2013 -0300 @@ -22,7 +22,7 @@ # # @test # @bug 6990846 7009092 7009085 7015391 7014948 7005986 7017840 7007596 -# 7157656 8002390 +# 7157656 8002390 7012868 7012856 # @summary Test ZipFileSystem demo # @build Basic PathOps ZipFSTester # @run shell basic.sh
--- a/test/java/awt/WMSpecificTests/Metacity/FullscreenDialogModality.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/awt/WMSpecificTests/Metacity/FullscreenDialogModality.java Wed Jun 05 13:10:11 2013 -0300 @@ -25,6 +25,8 @@ * @test * @bug 8012586 * @summary verify that modal dialog will appeared above fullscreen window under Metacity WM. + * @library ../../regtesthelpers + * @build Util * @run main FullscreenDialogModality * @run main/othervm FullscreenDialogModality * @author vkravets
--- a/test/java/awt/Window/TranslucentJAppletTest/TranslucentJAppletTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/awt/Window/TranslucentJAppletTest/TranslucentJAppletTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -37,11 +37,12 @@ public class TranslucentJAppletTest { + private static volatile GraphicsConfiguration graphicsConfig = null; private static JFrame frame; private static volatile boolean paintComponentCalled = false; private static void initAndShowGUI() { - frame = new JFrame(); + frame = new JFrame(graphicsConfig); JApplet applet = new JApplet(); applet.setBackground(new Color(0, 0, 0, 0)); JPanel panel = new JPanel() { @@ -66,6 +67,27 @@ { sun.awt.SunToolkit tk = (sun.awt.SunToolkit)Toolkit.getDefaultToolkit(); + final GraphicsEnvironment ge = GraphicsEnvironment.getLocalGraphicsEnvironment(); + for (GraphicsDevice gd : ge.getScreenDevices()) { + if (gd.isWindowTranslucencySupported( + GraphicsDevice.WindowTranslucency.PERPIXEL_TRANSLUCENT)) + { + for (GraphicsConfiguration gc : gd.getConfigurations()) { + if (gc.isTranslucencyCapable()) { + graphicsConfig = gc; + break; + } + } + } + if (graphicsConfig != null) { + break; + } + } + if (graphicsConfig == null) { + System.out.println("The system does not support translucency. Consider the test passed."); + return; + } + Robot r = new Robot(); Color color1 = r.getPixelColor(100, 100); // (0, 0) in frame coordinates
--- a/test/java/awt/print/PrinterJob/Collate2DPrintingTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/awt/print/PrinterJob/Collate2DPrintingTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -23,7 +23,7 @@ /** * @test - * @bug 6362683 + * @bug 6362683 8012381 * @summary Collation should work. * @run main/manual Collate2DPrintingTest */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/awt/print/PrinterJob/PrintLatinCJKTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 800535 + * @summary JDK7 Printing: CJK and Latin Text in string overlap + * @run main/manual=yesno PrintLatinCJKTest + */ + +import java.awt.Font; +import java.awt.Graphics; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.print.PageFormat; +import java.awt.print.Pageable; +import java.awt.print.Printable; +import java.awt.print.PrinterException; +import java.awt.print.PrinterJob; +import javax.swing.JButton; +import javax.swing.JFrame; +import javax.swing.JTextArea; + +import javax.swing.SwingUtilities; + +public class PrintLatinCJKTest implements Printable, ActionListener { + + static PrintLatinCJKTest testInstance = new PrintLatinCJKTest(); + private PageFormat pf; + + static String info = + "You need a printer for this test. If you have none, let "+ + "the test pass. If there is a printer, press Print, send "+ + "the output to the printer, and examine it. It should have "+ + "text looking like this : \u4e00\u4e01\u4e02\u4e03\u4e04English."; + + public static void showFrame() { + JFrame f = new JFrame(); + JTextArea jta = new JTextArea(info, 4, 30); + jta.setLineWrap(true); + jta.setWrapStyleWord(true); + f.add("Center", jta); + JButton b = new JButton("Print"); + b.addActionListener(testInstance); + f.add("South", b); + f.pack(); + f.setVisible(true); + } + + public int print(Graphics g, PageFormat pf, int pageIndex) + throws PrinterException { + + if (pageIndex > 0) { + return Printable.NO_SUCH_PAGE; + } + g.translate((int) pf.getImageableX(), (int) pf.getImageableY()); + g.setFont(new Font("Dialog", Font.PLAIN, 36)); + g.drawString("\u4e00\u4e01\u4e02\u4e03\u4e04English", 20, 100); + return Printable.PAGE_EXISTS; + } + + public void actionPerformed(ActionEvent e) { + try { + PrinterJob job = PrinterJob.getPrinterJob(); + job.setPrintable(testInstance); + if (job.printDialog()) { + job.print(); + } + } catch (PrinterException ex) { + ex.printStackTrace(); + } + } + + public static void main(String[] args) { + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + showFrame(); + } + }); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/beans/XMLEncoder/Test8013416.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8013416 + * @summary Tests public synthetic methods + * @author Sergey Malenkov + */ + +import java.beans.DefaultPersistenceDelegate; +import java.beans.Encoder; +import java.beans.Expression; +import java.beans.Statement; +import java.beans.XMLEncoder; +import java.util.HashMap; +import java.util.Map.Entry; +import java.util.Set; + +public class Test8013416 extends AbstractTest { + public static void main(String[] args) { + new Test8013416().test(true); + } + + protected Object getObject() { + Public<String, String> map = new Public<String, String>(); + map.put(" pz1 ", " pz2 "); + map.put(" pz3 ", " pz4 "); + return map; + } + + @Override + protected void initialize(XMLEncoder encoder) { + super.initialize(encoder); + encoder.setPersistenceDelegate(Public.class, new PublicPersistenceDelegate()); + } + + private static final class PublicPersistenceDelegate extends DefaultPersistenceDelegate { + @Override + protected Expression instantiate(Object oldInstance, Encoder out) { + return new Expression(oldInstance, oldInstance.getClass(), "new", null); + } + + @Override + protected void initialize(Class<?> type, Object oldInstance, Object newInstance, Encoder out) { + super.initialize(type, oldInstance, newInstance, out); + + Public<String, String> map = (Public) oldInstance; + for (Entry<String, String> entry : map.getAll()) { + String[] args = {entry.getKey(), entry.getValue()}; + out.writeStatement(new Statement(oldInstance, "put", args)); + } + } + } + + public static final class Public<K, V> extends Private<K, V> { + } + + private static class Private<K, V> { + private HashMap<K, V> map = new HashMap<K, V>(); + + public void put(K key, V value) { + this.map.put(key, value); + } + + public Set<Entry<K, V>> getAll() { + return this.map.entrySet(); + } + } +}
--- a/test/java/io/pathNames/General.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/io/pathNames/General.java Wed Jun 05 13:10:11 2013 -0300 @@ -277,8 +277,8 @@ { check(ans, ask + slash); checkNames(depth, create, - ans, - ask); + ans.endsWith(File.separator) ? ans : ans + File.separator, + ask + slash); } @@ -308,9 +308,6 @@ String ans, String ask) throws Exception { - ans = ans.endsWith(File.separator) ? ans : ans + File.separator; - ask = ask.endsWith(File.separator) ? ask : ask + File.separator; - int d = depth - 1; File f = new File(ans); String n;
--- a/test/java/io/pathNames/GeneralWin32.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/io/pathNames/GeneralWin32.java Wed Jun 05 13:10:11 2013 -0300 @@ -50,13 +50,13 @@ private static final int DEPTH = 2; private static String baseDir = null; private static String userDir = null; + private static String relative = null; /* Pathnames relative to working directory */ private static void checkCaseLookup() throws IOException { /* Use long names here to avoid 8.3 format, which Samba servers often force to lowercase */ - String relative = baseDir.substring(userDir.length() + 1); File d1 = new File(relative, "XyZzY0123"); File d2 = new File(d1, "FOO_bar_BAZ"); File f = new File(d2, "GLORPified"); @@ -79,9 +79,9 @@ case of filenames, rather than just using the input case */ File y = new File(userDir, f.getPath()); String ans = y.getPath(); - check(ans, relative + "\\" + "XyZzY0123\\FOO_bar_BAZ\\GLORPified"); - check(ans, relative + "\\" + "xyzzy0123\\foo_bar_baz\\glorpified"); - check(ans, relative + "\\" + "XYZZY0123\\FOO_BAR_BAZ\\GLORPIFIED"); + check(ans, relative + "XyZzY0123\\FOO_bar_BAZ\\GLORPified"); + check(ans, relative + "xyzzy0123\\foo_bar_baz\\glorpified"); + check(ans, relative + "XYZZY0123\\FOO_BAR_BAZ\\GLORPIFIED"); } private static void checkWild(File f) throws Exception { @@ -103,8 +103,7 @@ private static void checkRelativePaths() throws Exception { checkCaseLookup(); checkWildCards(); - String relative = baseDir.substring(userDir.length() + 1); - checkNames(3, true, baseDir.toString(), relative); + checkNames(3, true, baseDir, relative); } @@ -136,7 +135,6 @@ String ans = exists ? df.getAbsolutePath() : d; if (!ans.endsWith("\\")) ans = ans + "\\"; - String relative = baseDir.substring(userDir.length() + 1); checkNames(depth, false, ans + relative, d + relative); } @@ -171,15 +169,16 @@ return; } if (args.length > 0) debug = true; - userDir = System.getProperty("user.dir"); - baseDir = initTestData(6); + userDir = System.getProperty("user.dir") + '\\'; + baseDir = initTestData(6) + '\\'; + relative = baseDir.substring(userDir.length()); checkRelativePaths(); checkDrivePaths(); checkUncPaths(); } private static String initTestData(int maxDepth) throws IOException { - File parent = new File(System.getProperty("user.dir")); + File parent = new File(userDir); String baseDir = null; maxDepth = maxDepth < DEPTH + 2 ? DEPTH + 2 : maxDepth; for (int i = 0; i < maxDepth; i ++) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/lang/IntegralPrimitiveToString.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Arrays; +import java.util.List; +import java.util.function.LongFunction; +import java.util.function.Function; + +import static org.testng.Assert.assertEquals; + +/** + * @test + * @run testng IntegralPrimitiveToString + * @summary test string conversions for primitive integral types. + * @author Mike Duigou + */ +public class IntegralPrimitiveToString { + + @Test(dataProvider="numbers") + public <N extends Number> void testToString(String description, + Function<N, BigInteger> converter, + Function<N, BigInteger> unsignedConverter, + N[] values, + Stringifier<N>[] stringifiers) { + System.out.printf("%s : conversions: %d values: %d\n", description, stringifiers.length, values.length); + for( N value : values) { + BigInteger asBigInt = converter.apply(value); + BigInteger asUnsignedBigInt = unsignedConverter.apply(value); + for(Stringifier<N> stringifier : stringifiers) { + stringifier.assertMatchingToString(value, asBigInt, asUnsignedBigInt, description); + } + } + } + + static class Stringifier<N extends Number> { + final boolean signed; + final int radix; + final Function<N,String> toString; + Stringifier(boolean signed, int radix, Function<N,String> toString) { + this.signed = signed; + this.radix = radix; + this.toString = toString; + } + + public void assertMatchingToString(N value, BigInteger asSigned, BigInteger asUnsigned, String description) { + String expected = signed + ? asSigned.toString(radix) + : asUnsigned.toString(radix); + + String actual = toString.apply(value); + + assertEquals(actual, expected, description + " conversion should be the same"); + } + } + + @DataProvider(name="numbers", parallel=true) + public Iterator<Object[]> testSetProvider() { + + return Arrays.asList( + new Object[] { "Byte", + (Function<Byte,BigInteger>) b -> BigInteger.valueOf((long) b), + (Function<Byte,BigInteger>) b -> BigInteger.valueOf(Integer.toUnsignedLong((byte) b)), + numberProvider((LongFunction<Byte>) l -> Byte.valueOf((byte) l), Byte.SIZE), + new Stringifier[] { + new Stringifier<Byte>(true, 10, b -> b.toString()), + new Stringifier<Byte>(true, 10, b -> Byte.toString(b)) + } + }, + new Object[] { "Short", + (Function<Short,BigInteger>) s -> BigInteger.valueOf((long) s), + (Function<Short,BigInteger>) s -> BigInteger.valueOf(Integer.toUnsignedLong((short) s)), + numberProvider((LongFunction<Short>) l -> Short.valueOf((short) l), Short.SIZE), + new Stringifier[] { + new Stringifier<Short>(true, 10, s -> s.toString()), + new Stringifier<Short>(true, 10, s -> Short.toString( s)) + } + }, + new Object[] { "Integer", + (Function<Integer,BigInteger>) i -> BigInteger.valueOf((long) i), + (Function<Integer,BigInteger>) i -> BigInteger.valueOf(Integer.toUnsignedLong(i)), + numberProvider((LongFunction<Integer>) l -> Integer.valueOf((int) l), Integer.SIZE), + new Stringifier[] { + new Stringifier<Integer>(true, 10, i -> i.toString()), + new Stringifier<Integer>(true, 10, i -> Integer.toString(i)), + new Stringifier<Integer>(false, 2, Integer::toBinaryString), + new Stringifier<Integer>(false, 16, Integer::toHexString), + new Stringifier<Integer>(false, 8, Integer::toOctalString), + new Stringifier<Integer>(true, 2, i -> Integer.toString(i, 2)), + new Stringifier<Integer>(true, 8, i -> Integer.toString(i, 8)), + new Stringifier<Integer>(true, 10, i -> Integer.toString(i, 10)), + new Stringifier<Integer>(true, 16, i -> Integer.toString(i, 16)), + new Stringifier<Integer>(true, Character.MAX_RADIX, i -> Integer.toString(i, Character.MAX_RADIX)), + new Stringifier<Integer>(false, 10, i -> Integer.toUnsignedString(i)), + new Stringifier<Integer>(false, 2, i -> Integer.toUnsignedString(i, 2)), + new Stringifier<Integer>(false, 8, i -> Integer.toUnsignedString(i, 8)), + new Stringifier<Integer>(false, 10, i -> Integer.toUnsignedString(i, 10)), + new Stringifier<Integer>(false, 16, i -> Integer.toUnsignedString(i, 16)), + new Stringifier<Integer>(false, Character.MAX_RADIX, i -> Integer.toUnsignedString(i, Character.MAX_RADIX)) + } + }, + new Object[] { "Long", + (Function<Long, BigInteger>) BigInteger::valueOf, + (Function<Long, BigInteger>) l -> { + if (l >= 0) { + return BigInteger.valueOf((long) l); + } else { + int upper = (int)(l >>> 32); + int lower = (int) (long) l; + + // return (upper << 32) + lower + return (BigInteger.valueOf(Integer.toUnsignedLong(upper))).shiftLeft(32). + add(BigInteger.valueOf(Integer.toUnsignedLong(lower))); + } + }, + numberProvider((LongFunction<Long>) Long::valueOf, Long.SIZE), + new Stringifier[] { + new Stringifier<Long>(true, 10, l -> l.toString()), + new Stringifier<Long>(true, 10, l -> Long.toString(l)), + new Stringifier<Long>(false, 2, Long::toBinaryString), + new Stringifier<Long>(false, 16, Long::toHexString), + new Stringifier<Long>(false, 8, Long::toOctalString), + new Stringifier<Long>(true, 2, l -> Long.toString(l, 2)), + new Stringifier<Long>(true, 8, l -> Long.toString(l, 8)), + new Stringifier<Long>(true, 10, l -> Long.toString(l, 10)), + new Stringifier<Long>(true, 16, l -> Long.toString(l, 16)), + new Stringifier<Long>(true, Character.MAX_RADIX, l -> Long.toString(l, Character.MAX_RADIX)), + new Stringifier<Long>(false, 10, Long::toUnsignedString), + new Stringifier<Long>(false, 2, l -> Long.toUnsignedString(l, 2)), + new Stringifier<Long>(false, 8, l-> Long.toUnsignedString(l, 8)), + new Stringifier<Long>(false, 10, l -> Long.toUnsignedString(l, 10)), + new Stringifier<Long>(false, 16, l -> Long.toUnsignedString(l, 16)), + new Stringifier<Long>(false, Character.MAX_RADIX, l -> Long.toUnsignedString(l, Character.MAX_RADIX)) + } + } + ).iterator(); + } + private static final long[] SOME_PRIMES = { + 3L, 5L, 7L, 11L, 13L, 17L, 19L, 23L, 29L, 31L, 37L, 41L, 43L, 47L, 53L, + 59L, 61L, 71L, 73L, 79L, 83L, 89L, 97L, 101L, 103L, 107L, 109L, 113L, + 5953L, 5981L, 5987L, 6007L, 6011L, 6029L, 6037L, 6043L, 6047L, 6053L, + 16369L, 16381L, 16411L, 32749L, 32771L, 65521L, 65537L, + (long) Integer.MAX_VALUE }; + + public <N extends Number> N[] numberProvider(LongFunction<N> boxer, int bits, N... extras) { + List<N> numbers = new ArrayList<>(); + + for(int bitmag = 0; bitmag < bits; bitmag++) { + long value = 1L << bitmag; + numbers.add(boxer.apply(value)); + numbers.add(boxer.apply(value - 1)); + numbers.add(boxer.apply(value + 1)); + numbers.add(boxer.apply(-value)); + for(int divisor = 0; divisor < SOME_PRIMES.length && value < SOME_PRIMES[divisor]; divisor++) { + numbers.add(boxer.apply(value - SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value + SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value * SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value / SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value | SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value & SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value ^ SOME_PRIMES[divisor])); + } + } + + numbers.addAll(Arrays.asList(extras)); + + return (N[]) numbers.toArray(new Number[numbers.size()]); + } +}
--- a/test/java/lang/management/MXBean/MXBeanBehavior.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/lang/management/MXBean/MXBeanBehavior.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,10 @@ import javax.management.*; public class MXBeanBehavior { + // Exclude list: list of platform MBeans that are not MXBeans + public static final HashSet<String> excludeList = new HashSet<>( + Arrays.asList("com.sun.management:type=DiagnosticCommand")); + public static void main(String[] args) throws Exception { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); @@ -92,6 +96,10 @@ by generic MXBean tests. */ private static void test(MBeanServer mbs, ObjectName name) throws Exception { + if(excludeList.contains(name.getCanonicalName())) { + // Skipping not MXBean objects. + return; + } System.out.println("Testing: " + name); MBeanInfo mbi = mbs.getMBeanInfo(name);
--- a/test/java/lang/management/ManagementFactory/MBeanServerMXBeanUnsupportedTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/lang/management/ManagementFactory/MBeanServerMXBeanUnsupportedTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2013 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,8 @@ import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; +import java.util.Arrays; +import java.util.HashSet; import javax.management.MBeanServer; import javax.management.MBeanServerBuilder; import javax.management.MBeanServerDelegate; @@ -81,6 +83,9 @@ public static class MBeanServerForwarderInvocationHandler implements InvocationHandler { + public static final HashSet<String> excludeList = new HashSet<String>( + Arrays.asList("com.sun.management:type=DiagnosticCommand")); + public static MBeanServerForwarder newProxyInstance() { final InvocationHandler handler = @@ -126,15 +131,17 @@ if (domain.equals("java.lang") || domain.equals("java.util.logging") || domain.equals("com.sun.management")) { - String mxbean = (String) - mbs.getMBeanInfo(name).getDescriptor().getFieldValue("mxbean"); - if (mxbean == null || !mxbean.equals("true")) { - throw new RuntimeException( + if(!excludeList.contains(name.getCanonicalName())) { + String mxbean = (String) + mbs.getMBeanInfo(name).getDescriptor().getFieldValue("mxbean"); + if (mxbean == null || !mxbean.equals("true")) { + throw new RuntimeException( "Platform MBeans must be MXBeans!"); - } - if (!(mbean instanceof StandardMBean)) { - throw new RuntimeException( + } + if (!(mbean instanceof StandardMBean)) { + throw new RuntimeException( "MXBeans must be wrapped in StandardMBean!"); + } } } return result;
--- a/test/java/net/CookieHandler/TestHttpCookie.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/net/CookieHandler/TestHttpCookie.java Wed Jun 05 13:10:11 2013 -0300 @@ -243,6 +243,10 @@ test("set-cookie2: Customer = \"WILE_E_COYOTE\"; Version = \"1\"; Path = \"/acme\"") .n("Customer").v("WILE_E_COYOTE").ver(1).p("/acme"); + // $NAME is reserved; result should be null + test("set-cookie2: $Customer = \"WILE_E_COYOTE\"; Version = \"1\"; Path = \"/acme\"") + .nil(); + // a 'full' cookie test("set-cookie2: Customer=\"WILE_E_COYOTE\"" + ";Version=\"1\"" +
--- a/test/java/net/InterfaceAddress/NetworkPrefixLength.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/net/InterfaceAddress/NetworkPrefixLength.java Wed Jun 05 13:10:11 2013 -0300 @@ -22,7 +22,7 @@ */ /* @test - * @bug 6707289 + * @bug 6707289 7107883 * @summary InterfaceAddress.getNetworkPrefixLength() does not conform to Javadoc */ @@ -47,6 +47,14 @@ passed = false; debug(nic.getName(), iaddr); } + InetAddress ia = iaddr.getAddress(); + if (ia.isLoopbackAddress() && ia instanceof Inet4Address) { + // assumption: prefix length will always be 8 + if (iaddr.getNetworkPrefixLength() != 8) { + out.println("Expected prefix of 8, got " + iaddr); + passed = false; + } + } } }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/nio/Buffer/Chars.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8014854 + * @summary Exercises CharBuffer#chars on each of the CharBuffer types + * @run testng Chars + */ + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.CharBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; + +public class Chars { + + static final Random RAND = new Random(); + + static final int SIZE = 128 + RAND.nextInt(1024); + + /** + * Randomize the char buffer's position and limit. + */ + static CharBuffer randomizeRange(CharBuffer cb) { + int mid = cb.capacity() >>> 1; + int start = RAND.nextInt(mid); + int end = mid + RAND.nextInt(mid); + cb.position(start); + cb.limit(end); + return cb; + } + + /** + * Randomize the char buffer's contents, position and limit. + */ + static CharBuffer randomize(CharBuffer cb) { + while (cb.hasRemaining()) { + cb.put((char)RAND.nextInt()); + } + return randomizeRange(cb); + } + + /** + * Sums the remaining chars in the char buffer. + */ + static int intSum(CharBuffer cb) { + int sum = 0; + cb.mark(); + while (cb.hasRemaining()) { + sum += cb.get(); + } + cb.reset(); + return sum; + } + + /** + * Creates char buffers to test, adding them to the given list. + */ + static void addCases(CharBuffer cb, List<CharBuffer> buffers) { + randomize(cb); + buffers.add(cb); + + buffers.add(cb.slice()); + buffers.add(cb.duplicate()); + buffers.add(cb.asReadOnlyBuffer()); + + buffers.add(randomizeRange(cb.slice())); + buffers.add(randomizeRange(cb.duplicate())); + buffers.add(randomizeRange(cb.asReadOnlyBuffer())); + } + + @DataProvider(name = "charbuffers") + public Object[][] createCharBuffers() { + List<CharBuffer> buffers = new ArrayList<>(); + + // heap + addCases(CharBuffer.allocate(SIZE), buffers); + addCases(CharBuffer.wrap(new char[SIZE]), buffers); + addCases(ByteBuffer.allocate(SIZE*2).order(ByteOrder.BIG_ENDIAN).asCharBuffer(), + buffers); + addCases(ByteBuffer.allocate(SIZE*2).order(ByteOrder.LITTLE_ENDIAN).asCharBuffer(), + buffers); + + // direct + addCases(ByteBuffer.allocateDirect(SIZE*2).order(ByteOrder.BIG_ENDIAN).asCharBuffer(), + buffers); + addCases(ByteBuffer.allocateDirect(SIZE*2).order(ByteOrder.LITTLE_ENDIAN).asCharBuffer(), + buffers); + + // read-only buffer backed by a CharSequence + buffers.add(CharBuffer.wrap(randomize(CharBuffer.allocate(SIZE)))); + + Object[][] params = new Object[buffers.size()][]; + for (int i = 0; i < buffers.size(); i++) { + CharBuffer cb = buffers.get(i); + params[i] = new Object[] { cb.getClass().getName(), cb }; + } + + return params; + } + + @Test(dataProvider = "charbuffers") + public void testChars(String type, CharBuffer cb) { + System.out.format("%s position=%d, limit=%d%n", type, cb.position(), cb.limit()); + int expected = intSum(cb); + assertEquals(cb.chars().sum(), expected); + assertEquals(cb.chars().parallel().sum(), expected); + } +}
--- a/test/java/nio/channels/AsynchronousChannelGroup/Unbounded.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/nio/channels/AsynchronousChannelGroup/Unbounded.java Wed Jun 05 13:10:11 2013 -0300 @@ -43,47 +43,24 @@ static volatile boolean finished; public static void main(String[] args) throws Exception { - // all accepted connections are added to a queue - final ArrayBlockingQueue<AsynchronousSocketChannel> queue = - new ArrayBlockingQueue<AsynchronousSocketChannel>(CONCURRENCY_COUNT); - // create listener to accept connections - final AsynchronousServerSocketChannel listener = + AsynchronousServerSocketChannel listener = AsynchronousServerSocketChannel.open() .bind(new InetSocketAddress(0)); - listener.accept((Void)null, new CompletionHandler<AsynchronousSocketChannel,Void>() { - public void completed(AsynchronousSocketChannel ch, Void att) { - queue.add(ch); - listener.accept((Void)null, this); - } - public void failed(Throwable exc, Void att) { - if (!finished) { - failed = true; - System.err.println("accept failed: " + exc); - } - } - }); - System.out.println("Listener created."); + + // establish connections - // establish lots of connections + AsynchronousSocketChannel[] clients = new AsynchronousSocketChannel[CONCURRENCY_COUNT]; + AsynchronousSocketChannel[] peers = new AsynchronousSocketChannel[CONCURRENCY_COUNT]; + int port = ((InetSocketAddress)(listener.getLocalAddress())).getPort(); SocketAddress sa = new InetSocketAddress(InetAddress.getLocalHost(), port); - AsynchronousSocketChannel[] channels = - new AsynchronousSocketChannel[CONCURRENCY_COUNT]; + for (int i=0; i<CONCURRENCY_COUNT; i++) { - int attempts = 0; - for (;;) { - try { - channels[i] = AsynchronousSocketChannel.open(); - channels[i].connect(sa).get(); - break; - } catch (IOException x) { - // probably resource issue so back off and retry - if (++attempts >= 3) - throw x; - Thread.sleep(50); - } - } + clients[i] = AsynchronousSocketChannel.open(); + Future<Void> result = clients[i].connect(sa); + peers[i] = listener.accept().get(); + result.get(); } System.out.println("All connection established."); @@ -91,9 +68,9 @@ final CyclicBarrier barrier = new CyclicBarrier(CONCURRENCY_COUNT+1); // initiate a read operation on each channel. - for (int i=0; i<CONCURRENCY_COUNT; i++) { + for (AsynchronousSocketChannel client: clients) { ByteBuffer buf = ByteBuffer.allocateDirect(100); - channels[i].read( buf, channels[i], + client.read(buf, client, new CompletionHandler<Integer,AsynchronousSocketChannel>() { public void completed(Integer bytesRead, AsynchronousSocketChannel ch) { try { @@ -113,13 +90,10 @@ System.out.println("All read operations outstanding."); // write data to each of the accepted connections - int remaining = CONCURRENCY_COUNT; - while (remaining > 0) { - AsynchronousSocketChannel ch = queue.take(); - ch.write(ByteBuffer.wrap("welcome".getBytes())).get(); - ch.shutdownOutput(); - ch.close(); - remaining--; + for (AsynchronousSocketChannel peer: peers) { + peer.write(ByteBuffer.wrap("welcome".getBytes())).get(); + peer.shutdownOutput(); + peer.close(); } // wait for all threads to reach the barrier
--- a/test/java/nio/file/Files/StreamTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/nio/file/Files/StreamTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -476,15 +476,25 @@ } public void testSecurityException() throws IOException { - Path triggerFile = testFolder.resolve(Paths.get("dir", "SecurityException")); - Files.createFile(triggerFile); - Path sampleFile = testFolder.resolve(Paths.get("dir", "sample")); - Files.createFile(sampleFile); - Path triggerDir = testFolder.resolve(Paths.get("dir2", "SecurityException")); - Files.createDirectories(triggerDir); + Path empty = testFolder.resolve("empty"); + Path triggerFile = Files.createFile(empty.resolve("SecurityException")); + Path sampleFile = Files.createDirectories(empty.resolve("sample")); + + Path dir2 = testFolder.resolve("dir2"); + Path triggerDir = Files.createDirectories(dir2.resolve("SecurityException")); Files.createFile(triggerDir.resolve("fileInSE")); - Path sample = testFolder.resolve(Paths.get("dir2", "file")); - Files.createFile(sample); + Path sample = Files.createFile(dir2.resolve("file")); + + Path triggerLink = null; + Path linkTriggerDir = null; + Path linkTriggerFile = null; + if (supportsLinks) { + Path dir = testFolder.resolve("dir"); + triggerLink = Files.createSymbolicLink(dir.resolve("SecurityException"), empty); + linkTriggerDir = Files.createSymbolicLink(dir.resolve("lnDirSE"), triggerDir); + linkTriggerFile = Files.createSymbolicLink(dir.resolve("lnFileSE"), triggerFile); + } + FaultyFileSystem.FaultyFSProvider fsp = FaultyFileSystem.FaultyFSProvider.getInstance(); FaultyFileSystem fs = (FaultyFileSystem) fsp.newFileSystem(testFolder, null); @@ -492,10 +502,10 @@ fsp.setFaultyMode(false); Path fakeRoot = fs.getRoot(); // validate setting - try (CloseableStream<Path> s = Files.list(fakeRoot.resolve("dir"))) { + try (CloseableStream<Path> s = Files.list(fakeRoot.resolve("empty"))) { String[] result = s.map(path -> path.getFileName().toString()) .toArray(String[]::new); - assertEqualsNoOrder(result, new String[] { "d1","f1", "lnDir2", "SecurityException", "sample" }); + assertEqualsNoOrder(result, new String[] { "SecurityException", "sample" }); } try (CloseableStream<Path> s = Files.walk(fakeRoot.resolve("dir2"))) { @@ -504,13 +514,21 @@ assertEqualsNoOrder(result, new String[] { "dir2", "SecurityException", "fileInSE", "file" }); } + if (supportsLinks) { + try (CloseableStream<Path> s = Files.list(fakeRoot.resolve("dir"))) { + String[] result = s.map(path -> path.getFileName().toString()) + .toArray(String[]::new); + assertEqualsNoOrder(result, new String[] { "d1", "f1", "lnDir2", "SecurityException", "lnDirSE", "lnFileSE" }); + } + } + // execute test fsp.setFaultyMode(true); // ignore file cause SecurityException - try (CloseableStream<Path> s = Files.walk(fakeRoot.resolve("dir"))) { + try (CloseableStream<Path> s = Files.walk(fakeRoot.resolve("empty"))) { String[] result = s.map(path -> path.getFileName().toString()) .toArray(String[]::new); - assertEqualsNoOrder(result, new String[] { "dir", "d1","f1", "lnDir2", "sample" }); + assertEqualsNoOrder(result, new String[] { "empty", "sample" }); } // skip folder cause SecurityException try (CloseableStream<Path> s = Files.walk(fakeRoot.resolve("dir2"))) { @@ -519,11 +537,29 @@ assertEqualsNoOrder(result, new String[] { "dir2", "file" }); } + if (supportsLinks) { + // not following links + try (CloseableStream<Path> s = Files.walk(fakeRoot.resolve("dir"))) { + String[] result = s.map(path -> path.getFileName().toString()) + .toArray(String[]::new); + assertEqualsNoOrder(result, new String[] { "dir", "d1", "f1", "lnDir2", "lnDirSE", "lnFileSE" }); + } + + // following links + try (CloseableStream<Path> s = Files.walk(fakeRoot.resolve("dir"), FileVisitOption.FOLLOW_LINKS)) { + String[] result = s.map(path -> path.getFileName().toString()) + .toArray(String[]::new); + // ?? Should fileInSE show up? + // With FaultyFS, it does as no exception thrown for link to "SecurityException" with read on "lnXxxSE" + assertEqualsNoOrder(result, new String[] { "dir", "d1", "f1", "lnDir2", "file", "lnDirSE", "lnFileSE", "fileInSE" }); + } + } + // list instead of walk - try (CloseableStream<Path> s = Files.list(fakeRoot.resolve("dir"))) { + try (CloseableStream<Path> s = Files.list(fakeRoot.resolve("empty"))) { String[] result = s.map(path -> path.getFileName().toString()) .toArray(String[]::new); - assertEqualsNoOrder(result, new String[] { "d1","f1", "lnDir2", "sample" }); + assertEqualsNoOrder(result, new String[] { "sample" }); } try (CloseableStream<Path> s = Files.list(fakeRoot.resolve("dir2"))) { String[] result = s.map(path -> path.getFileName().toString()) @@ -578,6 +614,11 @@ if (fs != null) { fs.close(); } + if (supportsLinks) { + Files.delete(triggerLink); + Files.delete(linkTriggerDir); + Files.delete(linkTriggerFile); + } Files.delete(triggerFile); Files.delete(sampleFile); Files.delete(sample); @@ -589,7 +630,6 @@ try (CloseableStream<String> s = Files.lines(testFolder.resolve("notExist"), Charset.forName("UTF-8"))) { s.forEach(l -> fail("File is not even exist!")); } catch (IOException ioe) { - ioe.printStackTrace(System.err); assertTrue(ioe instanceof NoSuchFileException); } }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/security/AccessController/LimitedDoPrivileged.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8014097 + * @summary Test the limited privilege scope version of doPrivileged + */ + +import java.security.*; +import java.util.*; + +public class LimitedDoPrivileged { + /* + * Test variations of doPrivileged() and doPrivileged() with a limited privilege scope + * in a sandbox with the usual default permission to read the system properties for the + * file and path separators. + * + * By passing in an "assigned" AccessControlContext that has + * no default permissions we can test how code privileges are being scoped. + */ + + private static final ProtectionDomain domain = + new ProtectionDomain(null, null, null, null); + private static final AccessControlContext acc = + new AccessControlContext(new ProtectionDomain[] { domain }); + private static final PropertyPermission pathPerm = + new PropertyPermission("path.separator", "read"); + private static final PropertyPermission filePerm = + new PropertyPermission("file.separator", "read"); + + public static void main(String[] args) throws Exception { + /* + * Verify that we have the usual default property read permission. + */ + AccessController.getContext().checkPermission(filePerm); + AccessController.getContext().checkPermission(pathPerm); + System.out.println("test 1 passed"); + + /* + * Inject the "no permission" AccessControlContext. + */ + AccessController.doPrivileged(new PrivilegedAction() { + public Object run() { + + /* + * Verify that we no longer have the "file.separator" permission. + */ + try { + AccessController.getContext().checkPermission(pathPerm); + } catch (AccessControlException ace) { + System.out.println("test 2 passed"); + } + + /* + * Verify that we can give ourselves limited privilege to read + * any system property starting with "path.". + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, null, new PropertyPermission("path.*", "read")); + System.out.println("test 3 passed"); + + /* + * Verify that if we give ourselves limited privilege to read + * any system property starting with "path." it won't give us the + * the ability to read "file.separator". + */ + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(filePerm); + return null; + } + }, null, new PropertyPermission("path.*", "read")); + } catch (AccessControlException ace) { + System.out.println("test 4 passed"); + } + + /* + * Verify that capturing and passing in the context with no default + * system property permission grants will prevent access that succeeded + * earlier without the context assignment. + */ + final AccessControlContext context = AccessController.getContext(); + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, context, new PropertyPermission("path.*", "read")); + } catch (AccessControlException ace) { + System.out.println("test 5 passed"); + } + + /* + * Verify that we can give ourselves full privilege to read + * any system property starting with "path.". + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }); + System.out.println("test 6 passed"); + + /* + * Verify that capturing and passing in the context with no default + * system property permission grants will prevent access that succeeded + * earlier without the context assignment. + */ + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, context); + } catch (AccessControlException ace) { + System.out.println("test 7 passed"); + } + + /* + * Verify that we can give ourselves limited privilege to read + * any system property starting with "path." when a limited + * privilege scope context is captured and passed to a regular + * doPrivileged() as an assigned context. + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + + /* + * Capture the limited privilege scope and inject it into the + * regular doPrivileged(). + */ + final AccessControlContext limitedContext = AccessController.getContext(); + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, limitedContext); + return null; + } + }, null, new PropertyPermission("path.*", "read")); + System.out.println("test 8 passed"); + + /* + * Verify that we can give ourselves limited privilege to read + * any system property starting with "path." it won't give us the + * the ability to read "file.separator" when a limited + * privilege scope context is captured and passed to a regular + * doPrivileged() as an assigned context. + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + + /* + * Capture the limited privilege scope and inject it into the + * regular doPrivileged(). + */ + final AccessControlContext limitedContext = AccessController.getContext(); + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(filePerm); + return null; + } + }, limitedContext); + } catch (AccessControlException ace) { + System.out.println("test 9 passed"); + } + return null; + } + }, null, new PropertyPermission("path.*", "read")); + + return null; + } + }, acc); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/util/Iterator/PrimitiveIteratorDefaults.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import org.testng.annotations.Test; + +import java.util.PrimitiveIterator; +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; + +/** + * @test + * @run testng PrimitiveIteratorDefaults + * @summary test default methods on PrimitiveIterator + */ +@Test +public class PrimitiveIteratorDefaults { + + public void testIntForEachRemainingWithNull() { + PrimitiveIterator.OfInt i = new PrimitiveIterator.OfInt() { + @Override + public int nextInt() { + return 0; + } + + @Override + public boolean hasNext() { + return false; + } + }; + + executeAndCatch(() -> i.forEachRemaining((IntConsumer) null)); + executeAndCatch(() -> i.forEachRemaining((Consumer<Integer>) null)); + } + + public void testLongForEachRemainingWithNull() { + PrimitiveIterator.OfLong i = new PrimitiveIterator.OfLong() { + @Override + public long nextLong() { + return 0; + } + + @Override + public boolean hasNext() { + return false; + } + }; + + executeAndCatch(() -> i.forEachRemaining((LongConsumer) null)); + executeAndCatch(() -> i.forEachRemaining((Consumer<Long>) null)); + } + + public void testDoubleForEachRemainingWithNull() { + PrimitiveIterator.OfDouble i = new PrimitiveIterator.OfDouble() { + @Override + public double nextDouble() { + return 0; + } + + @Override + public boolean hasNext() { + return false; + } + }; + + executeAndCatch(() -> i.forEachRemaining((DoubleConsumer) null)); + executeAndCatch(() -> i.forEachRemaining((Consumer<Double>) null)); + } + + private void executeAndCatch(Runnable r) { + executeAndCatch(NullPointerException.class, r); + } + + private void executeAndCatch(Class<? extends Exception> expected, Runnable r) { + Exception caught = null; + try { + r.run(); + } + catch (Exception e) { + caught = e; + } + + assertNotNull(caught, + String.format("No Exception was thrown, expected an Exception of %s to be thrown", + expected.getName())); + assertTrue(expected.isInstance(caught), + String.format("Exception thrown %s not an instance of %s", + caught.getClass().getName(), expected.getName())); + } + +}
--- a/test/java/util/Locale/LocaleCategory.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/Locale/LocaleCategory.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4700857 6997928 7079486
--- a/test/java/util/Locale/LocaleProviders.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/Locale/LocaleProviders.java Wed Jun 05 13:10:11 2013 -0300 @@ -64,6 +64,10 @@ bug8013086Test(args[1], args[2]); break; + case "bug8013903Test": + bug8013903Test(); + break; + default: throw new RuntimeException("Test method '"+methodName+"' not found."); } @@ -195,4 +199,30 @@ // ParseException is fine in this test, as it's not "UTC" } } + + static void bug8013903Test() { + if (System.getProperty("os.name").startsWith("Windows")) { + Date sampleDate = new Date(0x10000000000L); + String fallbackResult = "Heisei 16.Nov.03 (Wed) AM 11:53:47"; + String jreResult = "\u5e73\u6210 16.11.03 (\u6c34) \u5348\u524d 11:53:47"; + Locale l = new Locale("ja", "JP", "JP"); + SimpleDateFormat sdf = new SimpleDateFormat("GGGG yyyy.MMM.dd '('E')' a hh:mm:ss", l); + String result = sdf.format(sampleDate); + System.out.println(result); + if (LocaleProviderAdapter.getAdapterPreference() + .contains(LocaleProviderAdapter.Type.JRE)) { + if (!jreResult.equals(result)) { + throw new RuntimeException("Format failed. result: \"" + + result + "\", expected: \"" + jreResult); + } + } else { + // should be FALLBACK, as Windows HOST does not return + // display names + if (!fallbackResult.equals(result)) { + throw new RuntimeException("Format failed. result: \"" + + result + "\", expected: \"" + fallbackResult); + } + } + } + } }
--- a/test/java/util/Locale/LocaleProviders.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/Locale/LocaleProviders.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,11 +21,10 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 6336885 7196799 7197573 7198834 8000245 8000615 8001440 8010666 -# 8013086 8013233 +# 8013086 8013233 8013903 # @summary tests for "java.locale.providers" system property # @compile -XDignore.symbol.file LocaleProviders.java # @run shell/timeout=600 LocaleProviders.sh @@ -300,4 +300,18 @@ PARAM3= runTest +# testing 8013903 fix. (Windows only) +METHODNAME=bug8013903Test +PREFLIST=HOST,JRE +PARAM1= +PARAM2= +PARAM3= +runTest +METHODNAME=bug8013903Test +PREFLIST=HOST +PARAM1= +PARAM2= +PARAM3= +runTest + exit $result
--- a/test/java/util/Locale/data/deflocale.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/Locale/data/deflocale.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # #
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/util/Map/CheckRandomHashSeed.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8005698 + * @summary Check operation of jdk.map.useRandomSeed property + * @run main CheckRandomHashSeed + * @run main/othervm -Djdk.map.useRandomSeed=false CheckRandomHashSeed + * @run main/othervm -Djdk.map.useRandomSeed=bogus CheckRandomHashSeed + * @run main/othervm -Djdk.map.useRandomSeed=true CheckRandomHashSeed true + * @author Brent Christian + */ +import java.lang.reflect.Field; +import java.util.Map; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Hashtable; +import java.util.WeakHashMap; + +public class CheckRandomHashSeed { + private final static String PROP_NAME = "jdk.map.useRandomSeed"; + static boolean expectRandom = false; + + public static void main(String[] args) { + if (args.length > 0 && args[0].equals("true")) { + expectRandom = true; + } + String hashSeedProp = System.getProperty(PROP_NAME); + boolean propSet = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + if (expectRandom != propSet) { + throw new Error("Error in test setup: " + (expectRandom ? "" : "not " ) + "expecting random hashSeed, but " + PROP_NAME + " is " + (propSet ? "" : "not ") + "enabled"); + } + + testMap(new HashMap()); + testMap(new LinkedHashMap()); + testMap(new WeakHashMap()); + testMap(new Hashtable()); + } + + private static void testMap(Map map) { + int hashSeed = getHashSeed(map); + boolean hashSeedIsZero = (hashSeed == 0); + + if (expectRandom != hashSeedIsZero) { + System.out.println("Test passed for " + map.getClass().getSimpleName() + " - expectRandom: " + expectRandom + ", hashSeed: " + hashSeed); + } else { + throw new Error ("Test FAILED for " + map.getClass().getSimpleName() + " - expectRandom: " + expectRandom + ", hashSeed: " + hashSeed); + } + } + + private static int getHashSeed(Map map) { + try { + if (map instanceof HashMap || map instanceof LinkedHashMap) { + map.put("Key", "Value"); + Field hashSeedField = HashMap.class.getDeclaredField("hashSeed"); + hashSeedField.setAccessible(true); + int hashSeed = hashSeedField.getInt(map); + return hashSeed; + } else { + map.put("Key", "Value"); + Field hashSeedField = map.getClass().getDeclaredField("hashSeed"); + hashSeedField.setAccessible(true); + int hashSeed = hashSeedField.getInt(map); + return hashSeed; + } + } catch(Exception e) { + e.printStackTrace(); + throw new Error(e); + } + } +}
--- a/test/java/util/Map/Collisions.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/Map/Collisions.java Wed Jun 05 13:10:11 2013 -0300 @@ -26,6 +26,7 @@ * @bug 7126277 * @run main Collisions -shortrun * @run main/othervm -Djdk.map.althashing.threshold=0 Collisions -shortrun + * @run main/othervm -Djdk.map.useRandomSeed=true Collisions -shortrun * @summary Ensure Maps behave well with lots of hashCode() collisions. * @author Mike Duigou */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/util/Map/InPlaceOpsCollisions.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8005698 + * @run main InPlaceOpsCollisions -shortrun + * @run main/othervm -Djdk.map.randomseed=true InPlaceOpsCollisions -shortrun + * @summary Ensure overrides of in-place operations in Maps behave well with lots of collisions. + * @author Brent Christian + */ +import java.util.*; +import java.util.function.*; + +public class InPlaceOpsCollisions { + + /** + * Number of elements per map. + */ + private static final int TEST_SIZE = 5000; + + final static class HashableInteger implements Comparable<HashableInteger> { + + final int value; + final int hashmask; //yes duplication + + HashableInteger(int value, int hashmask) { + this.value = value; + this.hashmask = hashmask; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HashableInteger) { + HashableInteger other = (HashableInteger) obj; + + return other.value == value; + } + + return false; + } + + @Override + public int hashCode() { + return value % hashmask; + } + + @Override + public int compareTo(HashableInteger o) { + return value - o.value; + } + + @Override + public String toString() { + return Integer.toString(value); + } + } + + static HashableInteger EXTRA_INT_VAL; + static String EXTRA_STRING_VAL; + + private static Object[][] makeTestData(int size) { + HashableInteger UNIQUE_OBJECTS[] = new HashableInteger[size]; + HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[size]; + String UNIQUE_STRINGS[] = new String[size]; + String COLLIDING_STRINGS[] = new String[size]; + + for (int i = 0; i < size; i++) { + UNIQUE_OBJECTS[i] = new HashableInteger(i, Integer.MAX_VALUE); + COLLIDING_OBJECTS[i] = new HashableInteger(i, 10); + UNIQUE_STRINGS[i] = unhash(i); + COLLIDING_STRINGS[i] = (0 == i % 2) + ? UNIQUE_STRINGS[i / 2] + : "\u0000\u0000\u0000\u0000\u0000" + COLLIDING_STRINGS[i - 1]; + } + EXTRA_INT_VAL = new HashableInteger(size, Integer.MAX_VALUE); + EXTRA_STRING_VAL = new String ("Extra Value"); + + return new Object[][] { + new Object[]{"Unique Objects", UNIQUE_OBJECTS}, + new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, + new Object[]{"Unique Strings", UNIQUE_STRINGS}, + new Object[]{"Colliding Strings", COLLIDING_STRINGS} + }; + } + + /** + * Returns a string with a hash equal to the argument. + * + * @return string with a hash equal to the argument. + */ + public static String unhash(int target) { + StringBuilder answer = new StringBuilder(); + if (target < 0) { + // String with hash of Integer.MIN_VALUE, 0x80000000 + answer.append("\\u0915\\u0009\\u001e\\u000c\\u0002"); + + if (target == Integer.MIN_VALUE) { + return answer.toString(); + } + // Find target without sign bit set + target = target & Integer.MAX_VALUE; + } + + unhash0(answer, target); + return answer.toString(); + } + + private static void unhash0(StringBuilder partial, int target) { + int div = target / 31; + int rem = target % 31; + + if (div <= Character.MAX_VALUE) { + if (div != 0) { + partial.append((char) div); + } + partial.append((char) rem); + } else { + unhash0(partial, div); + partial.append((char) rem); + } + } + + private static void realMain(String[] args) throws Throwable { + boolean shortRun = args.length > 0 && args[0].equals("-shortrun"); + + Object[][] mapKeys = makeTestData(shortRun ? (TEST_SIZE / 2) : TEST_SIZE); + + // loop through data sets + for (Object[] keys_desc : mapKeys) { + Map<Object, Object>[] maps = (Map<Object, Object>[]) new Map[]{ + new HashMap<>(), + new LinkedHashMap<>(), + }; + + // for each map type. + for (Map<Object, Object> map : maps) { + String desc = (String) keys_desc[0]; + Object[] keys = (Object[]) keys_desc[1]; + try { + testInPlaceOps(map, desc, keys); + } catch(Exception all) { + unexpected("Failed for " + map.getClass().getName() + " with " + desc, all); + } + } + } + } + + private static <T> void testInsertion(Map<T, T> map, String keys_desc, T[] keys) { + check("map empty", (map.size() == 0) && map.isEmpty()); + + for (int i = 0; i < keys.length; i++) { + check(String.format("insertion: map expected size m%d != i%d", map.size(), i), + map.size() == i); + check(String.format("insertion: put(%s[%d])", keys_desc, i), null == map.put(keys[i], keys[i])); + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + check(String.format("insertion: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); + } + + check(String.format("map expected size m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + } + + + private static <T> void testInPlaceOps(Map<T, T> map, String keys_desc, T[] keys) { + System.out.println(map.getClass() + " : " + keys_desc + ", testInPlaceOps"); + System.out.flush(); + + testInsertion(map, keys_desc, keys); + testPutIfAbsent(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testRemoveMapping(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testReplaceOldValue(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testReplaceIfMapped(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfAbsent(map, keys_desc, keys, (k) -> getExtraVal(keys[0])); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfAbsent(map, keys_desc, keys, (k) -> null); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfPresent(map, keys_desc, keys, (k, v) -> getExtraVal(keys[0])); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfPresent(map, keys_desc, keys, (k, v) -> null); + + if (!keys_desc.contains("Strings")) { // avoid parseInt() number format error + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeNonNull(map, keys_desc, keys); + } + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeNull(map, keys_desc, keys); + + if (!keys_desc.contains("Strings")) { // avoid parseInt() number format error + map.clear(); + testInsertion(map, keys_desc, keys); + testMergeNonNull(map, keys_desc, keys); + } + + map.clear(); + testInsertion(map, keys_desc, keys); + testMergeNull(map, keys_desc, keys); + } + + + + private static <T> void testPutIfAbsent(Map<T, T> map, String keys_desc, T[] keys) { + T extraVal = getExtraVal(keys[0]); + T retVal; + removeOddKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + retVal = map.putIfAbsent(keys[i], extraVal); + if (i % 2 == 0) { // even: not absent, not put + check(String.format("putIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == keys[i]); + check(String.format("putIfAbsent: get(%s[%d])", keys_desc, i), keys[i] == map.get(keys[i])); + check(String.format("putIfAbsent: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); + } else { // odd: absent, was put + check(String.format("putIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == null); + check(String.format("putIfAbsent: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("putIfAbsent: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + } + + private static <T> void testRemoveMapping(Map<T, T> map, String keys_desc, T[] keys) { + T extraVal = getExtraVal(keys[0]); + boolean removed; + int removes = 0; + remapOddKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + removed = map.remove(keys[i], keys[i]); + if (i % 2 == 0) { // even: original mapping, should be removed + check(String.format("removeMapping: retVal(%s[%d])", keys_desc, i), removed); + check(String.format("removeMapping: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("removeMapping: !containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + check(String.format("removeMapping: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + removes++; + } else { // odd: new mapping, not removed + check(String.format("removeMapping: retVal(%s[%d])", keys_desc, i), !removed); + check(String.format("removeMapping: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("removeMapping: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + check(String.format("removeMapping: containsValue(%s[%d])", keys_desc, i), map.containsValue(extraVal)); + } + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length - removes), + map.size() == keys.length - removes); + } + + private static <T> void testReplaceOldValue(Map<T, T> map, String keys_desc, T[] keys) { + // remap odds to extraVal + // call replace to replace for extraVal, for all keys + // check that all keys map to value from keys array + T extraVal = getExtraVal(keys[0]); + boolean replaced; + remapOddKeys(map, keys); + + for (int i = 0; i < keys.length; i++) { + replaced = map.replace(keys[i], extraVal, keys[i]); + if (i % 2 == 0) { // even: original mapping, should not be replaced + check(String.format("replaceOldValue: retVal(%s[%d])", keys_desc, i), !replaced); + } else { // odd: new mapping, should be replaced + check(String.format("replaceOldValue: get(%s[%d])", keys_desc, i), replaced); + } + check(String.format("replaceOldValue: get(%s[%d])", keys_desc, i), keys[i] == map.get(keys[i])); + check(String.format("replaceOldValue: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + check(String.format("replaceOldValue: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); +// removes++; + } + check(String.format("replaceOldValue: !containsValue(%s[%s])", keys_desc, extraVal.toString()), !map.containsValue(extraVal)); + check(String.format("map expected size m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + } + + // TODO: Test case for key mapped to null value + private static <T> void testReplaceIfMapped(Map<T, T> map, String keys_desc, T[] keys) { + // remove odd keys + // call replace for all keys[] + // odd keys should remain absent, even keys should be mapped to EXTRA, no value from keys[] should be in map + T extraVal = getExtraVal(keys[0]); + int expectedSize1 = 0; + removeOddKeys(map, keys); + int expectedSize2 = map.size(); + + for (int i = 0; i < keys.length; i++) { + T retVal = map.replace(keys[i], extraVal); + if (i % 2 == 0) { // even: still in map, should be replaced + check(String.format("replaceIfMapped: retVal(%s[%d])", keys_desc, i), retVal == keys[i]); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize1++; + } else { // odd: was removed, should not be replaced + check(String.format("replaceIfMapped: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } + check(String.format("replaceIfMapped: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("replaceIfMapped: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize1), + map.size() == expectedSize1); + check(String.format("map expected size#2 m%d != k%d", map.size(), expectedSize2), + map.size() == expectedSize2); + + } + + private static <T> void testComputeIfAbsent(Map<T, T> map, String keys_desc, T[] keys, + Function<T,T> mappingFunction) { + // remove a third of the keys + // call computeIfAbsent for all keys, func returns EXTRA + // check that removed keys now -> EXTRA, other keys -> original val + T expectedVal = mappingFunction.apply(keys[0]); + T retVal; + int expectedSize = 0; + removeThirdKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + retVal = map.computeIfAbsent(keys[i], mappingFunction); + if (i % 3 != 2) { // key present, not computed + check(String.format("computeIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == keys[i]); + check(String.format("computeIfAbsent: get(%s[%d])", keys_desc, i), keys[i] == map.get(keys[i])); + check(String.format("computeIfAbsent: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize++; + } else { // key absent, computed unless function return null + check(String.format("computeIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == expectedVal); + check(String.format("computeIfAbsent: get(%s[%d])", keys_desc, i), expectedVal == map.get(keys[i])); + check(String.format("computeIfAbsent: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + // mapping should not be added if function returns null + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i]) != (expectedVal == null)); + if (expectedVal != null) { expectedSize++; } + } + } + if (expectedVal != null) { + check(String.format("computeIfAbsent: containsValue(%s[%s])", keys_desc, expectedVal), map.containsValue(expectedVal)); + } + check(String.format("map expected size m%d != k%d", map.size(), expectedSize), + map.size() == expectedSize); + } + + private static <T> void testComputeIfPresent(Map<T, T> map, String keys_desc, T[] keys, + BiFunction<T,T,T> mappingFunction) { + // remove a third of the keys + // call testComputeIfPresent for all keys[] + // removed keys should remain absent, even keys should be mapped to $RESULT + // no value from keys[] should be in map + T funcResult = mappingFunction.apply(keys[0], keys[0]); + int expectedSize1 = 0; + removeThirdKeys(map, keys); + + for (int i = 0; i < keys.length; i++) { + T retVal = map.computeIfPresent(keys[i], mappingFunction); + if (i % 3 != 2) { // key present + if (funcResult == null) { // was removed + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } else { // value was replaced + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize1++; + } + check(String.format("computeIfPresent: retVal(%s[%s])", keys_desc, i), retVal == funcResult); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), funcResult == map.get(keys[i])); + + } else { // odd: was removed, should not be replaced + check(String.format("replaceIfMapped: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } + check(String.format("replaceIfMapped: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize1), + map.size() == expectedSize1); + } + + private static <T> void testComputeNonNull(Map<T, T> map, String keys_desc, T[] keys) { + // remove a third of the keys + // call compute() for all keys[] + // all keys should be present: removed keys -> EXTRA, others to k-1 + BiFunction<T,T,T> mappingFunction = (k, v) -> { + if (v == null) { + return getExtraVal(keys[0]); + } else { + return keys[Integer.parseInt(k.toString()) - 1]; + } + }; + T extraVal = getExtraVal(keys[0]); + removeThirdKeys(map, keys); + for (int i = 1; i < keys.length; i++) { + T retVal = map.compute(keys[i], mappingFunction); + if (i % 3 != 2) { // key present, should be mapped to k-1 + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == keys[i-1]); + check(String.format("compute: get(%s[%d])", keys_desc, i), keys[i-1] == map.get(keys[i])); + } else { // odd: was removed, should be replaced with EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + } + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + } + check(String.format("map expected size#1 m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("compute: !containsValue(%s,[null])", keys_desc), !map.containsValue(null)); + } + + private static <T> void testComputeNull(Map<T, T> map, String keys_desc, T[] keys) { + // remove a third of the keys + // call compute() for all keys[] + // removed keys should -> EXTRA + // for other keys: func returns null, should have no mapping + BiFunction<T,T,T> mappingFunction = (k, v) -> { + // if absent/null -> EXTRA + // if present -> null + if (v == null) { + return getExtraVal(keys[0]); + } else { + return null; + } + }; + T extraVal = getExtraVal(keys[0]); + int expectedSize = 0; + removeThirdKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + T retVal = map.compute(keys[i], mappingFunction); + if (i % 3 != 2) { // key present, func returned null, should be absent from map + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("compute: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + check(String.format("compute: containsValue(%s[%s])", keys_desc, i), !map.containsValue(keys[i])); + } else { // odd: was removed, should now be mapped to EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize++; + } + } + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize), + map.size() == expectedSize); + } + + private static <T> void testMergeNonNull(Map<T, T> map, String keys_desc, T[] keys) { + // remove a third of the keys + // call merge() for all keys[] + // all keys should be present: removed keys now -> EXTRA, other keys -> k-1 + + // Map to preceding key + BiFunction<T,T,T> mappingFunction = (k, v) -> keys[Integer.parseInt(k.toString()) - 1]; + T extraVal = getExtraVal(keys[0]); + removeThirdKeys(map, keys); + for (int i = 1; i < keys.length; i++) { + T retVal = map.merge(keys[i], extraVal, mappingFunction); + if (i % 3 != 2) { // key present, should be mapped to k-1 + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == keys[i-1]); + check(String.format("compute: get(%s[%d])", keys_desc, i), keys[i-1] == map.get(keys[i])); + } else { // odd: was removed, should be replaced with EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + } + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + } + + check(String.format("map expected size#1 m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("compute: !containsValue(%s,[null])", keys_desc), !map.containsValue(null)); + + } + + private static <T> void testMergeNull(Map<T, T> map, String keys_desc, T[] keys) { + // remove a third of the keys + // call merge() for all keys[] + // result: removed keys -> EXTRA, other keys absent + + BiFunction<T,T,T> mappingFunction = (k, v) -> null; + T extraVal = getExtraVal(keys[0]); + int expectedSize = 0; + removeThirdKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + T retVal = map.merge(keys[i], extraVal, mappingFunction); + if (i % 3 != 2) { // key present, func returned null, should be absent from map + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("compute: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } else { // odd: was removed, should now be mapped to EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize++; + } + check(String.format("compute: containsValue(%s[%s])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize), + map.size() == expectedSize); + } + + /* + * Return the EXTRA val for the key type being used + */ + private static <T> T getExtraVal(T key) { + if (key instanceof HashableInteger) { + return (T)EXTRA_INT_VAL; + } else { + return (T)EXTRA_STRING_VAL; + } + } + + /* + * Remove half of the keys + */ + private static <T> void removeOddKeys(Map<T, T> map, /*String keys_desc, */ T[] keys) { + int removes = 0; + for (int i = 0; i < keys.length; i++) { + if (i % 2 != 0) { + map.remove(keys[i]); + removes++; + } + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length - removes), + map.size() == keys.length - removes); + } + + /* + * Remove every third key + * This will hopefully leave some removed keys in TreeBins for, e.g., computeIfAbsent + * w/ a func that returns null. + * + * TODO: consider using this in other tests (and maybe adding a remapThirdKeys) + */ + private static <T> void removeThirdKeys(Map<T, T> map, /*String keys_desc, */ T[] keys) { + int removes = 0; + for (int i = 0; i < keys.length; i++) { + if (i % 3 == 2) { + map.remove(keys[i]); + removes++; + } + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length - removes), + map.size() == keys.length - removes); + } + + /* + * Re-map the odd-numbered keys to map to the EXTRA value + */ + private static <T> void remapOddKeys(Map<T, T> map, /*String keys_desc, */ T[] keys) { + T extraVal = getExtraVal(keys[0]); + for (int i = 0; i < keys.length; i++) { + if (i % 2 != 0) { + map.put(keys[i], extraVal); + } + } + } + + //--------------------- Infrastructure --------------------------- + static volatile int passed = 0, failed = 0; + + static void pass() { + passed++; + } + + static void fail() { + failed++; + (new Error("Failure")).printStackTrace(System.err); + } + + static void fail(String msg) { + failed++; + (new Error("Failure: " + msg)).printStackTrace(System.err); + } + + static void abort() { + fail(); + System.exit(1); + } + + static void abort(String msg) { + fail(msg); + System.exit(1); + } + + static void unexpected(String msg, Throwable t) { + System.err.println("Unexpected: " + msg); + unexpected(t); + } + + static void unexpected(Throwable t) { + failed++; + t.printStackTrace(System.err); + } + + static void check(boolean cond) { + if (cond) { + pass(); + } else { + fail(); + } + } + + static void check(String desc, boolean cond) { + if (cond) { + pass(); + } else { + fail(desc); + } + } + + static void equal(Object x, Object y) { + if (Objects.equals(x, y)) { + pass(); + } else { + fail(x + " not equal to " + y); + } + } + + public static void main(String[] args) throws Throwable { + Thread.currentThread().setName(Collisions.class.getName()); +// Thread.currentThread().setPriority(Thread.MAX_PRIORITY); + try { + realMain(args); + } catch (Throwable t) { + unexpected(t); + } + + System.out.printf("%nPassed = %d, failed = %d%n%n", passed, failed); + if (failed > 0) { + throw new Error("Some tests failed"); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/util/Map/TreeBinSplitBackToEntries.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.*; +import java.lang.reflect.Field; + +/* + * @test + * @bug 8005698 + * @summary Test the case where TreeBin.splitTreeBin() converts a bin back to an Entry list + * @run main TreeBinSplitBackToEntries unused + * @author Brent Christian + */ + +public class TreeBinSplitBackToEntries { + private static int EXPECTED_TREE_THRESHOLD = 16; + + // Easiest if this covers one bit higher then 'bit' in splitTreeBin() on the + // call where the TreeBin is converted back to an Entry list + private static int HASHMASK = 0x7F; + private static boolean verbose = false; + private static boolean fastFail = false; + private static boolean failed = false; + + static void printlnIfVerbose(String msg) { + if (verbose) {System.out.println(msg); } + } + + public static void main(String[] args) { + for (String arg : args) { + switch(arg) { + case "-verbose": + verbose = true; + break; + case "-fastfail": + fastFail = true; + break; + } + } + checkTreeThreshold(); + testMapHiTree(); + testMapLoTree(); + if (failed) { + System.out.println("Test Failed"); + System.exit(1); + } else { + System.out.println("Test Passed"); + } + } + + public static void checkTreeThreshold() { + int threshold = -1; + try { + Class treeBinClass = Class.forName("java.util.HashMap$TreeBin"); + Field treeThreshold = treeBinClass.getDeclaredField("TREE_THRESHOLD"); + treeThreshold.setAccessible(true); + threshold = treeThreshold.getInt(treeBinClass); + } catch (ClassNotFoundException|NoSuchFieldException|IllegalAccessException e) { + e.printStackTrace(); + throw new Error("Problem accessing TreeBin.TREE_THRESHOLD", e); + } + check("Expected TREE_THRESHOLD: " + EXPECTED_TREE_THRESHOLD +", found: " + threshold, + threshold == EXPECTED_TREE_THRESHOLD); + printlnIfVerbose("TREE_THRESHOLD: " + threshold); + } + + public static void testMapHiTree() { + Object[][] mapKeys = makeHiTreeTestData(); + testMapsForKeys(mapKeys, "hiTree"); + } + + public static void testMapLoTree() { + Object[][] mapKeys = makeLoTreeTestData(); + + testMapsForKeys(mapKeys, "loTree"); + } + + public static void testMapsForKeys(Object[][] mapKeys, String desc) { + // loop through data sets + for (Object[] keys_desc : mapKeys) { + Map<Object, Object>[] maps = (Map<Object, Object>[]) new Map[]{ + new HashMap<>(4, 0.8f), + new LinkedHashMap<>(4, 0.8f), + }; + // for each map type. + for (Map<Object, Object> map : maps) { + Object[] keys = (Object[]) keys_desc[1]; + System.out.println(desc + ": testPutThenGet() for " + map.getClass()); + testPutThenGet(map, keys); + } + } + } + + private static <T> void testPutThenGet(Map<T, T> map, T[] keys) { + for (T key : keys) { + printlnIfVerbose("put()ing 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + ", hashCode=" + Integer.toHexString(key.hashCode())); + map.put(key, key); + } + for (T key : keys) { + check("key: 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + " not found in resulting " + map.getClass().getSimpleName(), map.get(key) != null); + } + } + + /* Data to force a non-empty loTree in TreeBin.splitTreeBin() to be converted back + * into an Entry list + */ + private static Object[][] makeLoTreeTestData() { + HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] { + new HashableInteger( 0x23, HASHMASK), + new HashableInteger( 0x123, HASHMASK), + new HashableInteger( 0x323, HASHMASK), + new HashableInteger( 0x523, HASHMASK), + + new HashableInteger( 0x723, HASHMASK), + new HashableInteger( 0x923, HASHMASK), + new HashableInteger( 0xB23, HASHMASK), + new HashableInteger( 0xD23, HASHMASK), + + new HashableInteger( 0xF23, HASHMASK), + new HashableInteger( 0xF123, HASHMASK), + new HashableInteger( 0x1023, HASHMASK), + new HashableInteger( 0x1123, HASHMASK), + + new HashableInteger( 0x1323, HASHMASK), + new HashableInteger( 0x1523, HASHMASK), + new HashableInteger( 0x1723, HASHMASK), + new HashableInteger( 0x1923, HASHMASK), + + new HashableInteger( 0x1B23, HASHMASK), + new HashableInteger( 0x1D23, HASHMASK), + new HashableInteger( 0x3123, HASHMASK), + new HashableInteger( 0x3323, HASHMASK), + new HashableInteger( 0x3523, HASHMASK), + + new HashableInteger( 0x3723, HASHMASK), + new HashableInteger( 0x1001, HASHMASK), + new HashableInteger( 0x4001, HASHMASK), + new HashableInteger( 0x1, HASHMASK), + }; + return new Object[][] { + new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, + }; + } + + /* Data to force the hiTree in TreeBin.splitTreeBin() to be converted back + * into an Entry list + */ + private static Object[][] makeHiTreeTestData() { + HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] { + new HashableInteger( 0x1, HASHMASK), + new HashableInteger( 0x101, HASHMASK), + new HashableInteger( 0x301, HASHMASK), + new HashableInteger( 0x501, HASHMASK), + new HashableInteger( 0x701, HASHMASK), + + new HashableInteger( 0x1001, HASHMASK), + new HashableInteger( 0x1101, HASHMASK), + new HashableInteger( 0x1301, HASHMASK), + + new HashableInteger( 0x1501, HASHMASK), + new HashableInteger( 0x1701, HASHMASK), + new HashableInteger( 0x4001, HASHMASK), + new HashableInteger( 0x4101, HASHMASK), + new HashableInteger( 0x4301, HASHMASK), + + new HashableInteger( 0x4501, HASHMASK), + new HashableInteger( 0x4701, HASHMASK), + new HashableInteger( 0x8001, HASHMASK), + new HashableInteger( 0x8101, HASHMASK), + + + new HashableInteger( 0x8301, HASHMASK), + new HashableInteger( 0x8501, HASHMASK), + new HashableInteger( 0x8701, HASHMASK), + new HashableInteger( 0x9001, HASHMASK), + + new HashableInteger( 0x23, HASHMASK), + new HashableInteger( 0x123, HASHMASK), + new HashableInteger( 0x323, HASHMASK), + new HashableInteger( 0x523, HASHMASK), + }; + return new Object[][] { + new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, + }; + } + + static void check(String desc, boolean cond) { + if (!cond) { + fail(desc); + } + } + + static void fail(String msg) { + failed = true; + (new Error("Failure: " + msg)).printStackTrace(System.err); + if (fastFail) { + System.exit(1); + } + } + + final static class HashableInteger implements Comparable<HashableInteger> { + final int value; + final int hashmask; //yes duplication + + HashableInteger(int value, int hashmask) { + this.value = value; + this.hashmask = hashmask; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HashableInteger) { + HashableInteger other = (HashableInteger) obj; + return other.value == value; + } + return false; + } + + @Override + public int hashCode() { + // This version ANDs the mask + return value & hashmask; + } + + @Override + public int compareTo(HashableInteger o) { + return value - o.value; + } + + @Override + public String toString() { + return Integer.toString(value); + } + } +}
--- a/test/java/util/PluggableLocale/BreakIteratorProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/BreakIteratorProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440
--- a/test/java/util/PluggableLocale/CalendarDataProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/CalendarDataProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 7058207 8000986
--- a/test/java/util/PluggableLocale/ClasspathTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/ClasspathTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 6388652
--- a/test/java/util/PluggableLocale/CollatorProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/CollatorProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440
--- a/test/java/util/PluggableLocale/CurrencyNameProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/CurrencyNameProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7199750 8000997
--- a/test/java/util/PluggableLocale/DateFormatProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/DateFormatProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7003643
--- a/test/java/util/PluggableLocale/DateFormatSymbolsProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/DateFormatSymbolsProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7200341
--- a/test/java/util/PluggableLocale/DecimalFormatSymbolsProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/DecimalFormatSymbolsProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440
--- a/test/java/util/PluggableLocale/ExecTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/ExecTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # #
--- a/test/java/util/PluggableLocale/GenericTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/GenericTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440
--- a/test/java/util/PluggableLocale/LocaleNameProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/LocaleNameProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 8000273
--- a/test/java/util/PluggableLocale/NumberFormatProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/NumberFormatProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7003643
--- a/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 8003267
--- a/test/java/util/ResourceBundle/Bug6299235Test.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/ResourceBundle/Bug6299235Test.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,4 +1,4 @@ -# +#!/bin/sh # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. #
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/util/Spliterator/SpliteratorCollisions.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8005698 + * @run testng SpliteratorCollisions + * @summary Spliterator traversing and splitting hash maps containing colliding hashes + * @author Brent Christian + */ + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Spliterator; +import java.util.TreeSet; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.LongConsumer; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import static org.testng.Assert.*; +import static org.testng.Assert.assertEquals; + +@Test +public class SpliteratorCollisions { + + private static List<Integer> SIZES = Arrays.asList(0, 1, 10, 100, 1000); + + private static class SpliteratorDataBuilder<T> { + List<Object[]> data; + List<T> exp; + Map<T, T> mExp; + + SpliteratorDataBuilder(List<Object[]> data, List<T> exp) { + this.data = data; + this.exp = exp; + this.mExp = createMap(exp); + } + + Map<T, T> createMap(List<T> l) { + Map<T, T> m = new LinkedHashMap<>(); + for (T t : l) { + m.put(t, t); + } + return m; + } + + void add(String description, Collection<?> expected, Supplier<Spliterator<?>> s) { + description = joiner(description).toString(); + data.add(new Object[]{description, expected, s}); + } + + void add(String description, Supplier<Spliterator<?>> s) { + add(description, exp, s); + } + + void addCollection(Function<Collection<T>, ? extends Collection<T>> c) { + add("new " + c.apply(Collections.<T>emptyList()).getClass().getName() + ".spliterator()", + () -> c.apply(exp).spliterator()); + } + + void addList(Function<Collection<T>, ? extends List<T>> l) { + // @@@ If collection is instance of List then add sub-list tests + addCollection(l); + } + + void addMap(Function<Map<T, T>, ? extends Map<T, T>> m) { + String description = "new " + m.apply(Collections.<T, T>emptyMap()).getClass().getName(); + add(description + ".keySet().spliterator()", () -> m.apply(mExp).keySet().spliterator()); + add(description + ".values().spliterator()", () -> m.apply(mExp).values().spliterator()); + add(description + ".entrySet().spliterator()", mExp.entrySet(), () -> m.apply(mExp).entrySet().spliterator()); + } + + StringBuilder joiner(String description) { + return new StringBuilder(description). + append(" {"). + append("size=").append(exp.size()). + append("}"); + } + } + + static Object[][] spliteratorDataProvider; + + @DataProvider(name = "HashableIntSpliterator") + public static Object[][] spliteratorDataProvider() { + if (spliteratorDataProvider != null) { + return spliteratorDataProvider; + } + + List<Object[]> data = new ArrayList<>(); + for (int size : SIZES) { + List<HashableInteger> exp = listIntRange(size, false); + SpliteratorDataBuilder<HashableInteger> db = new SpliteratorDataBuilder<>(data, exp); + + // Maps + db.addMap(HashMap::new); + db.addMap(LinkedHashMap::new); + + // Collections that use HashMap + db.addCollection(HashSet::new); + db.addCollection(LinkedHashSet::new); + db.addCollection(TreeSet::new); + } + return spliteratorDataProvider = data.toArray(new Object[0][]); + } + + static Object[][] spliteratorDataProviderWithNull; + + @DataProvider(name = "HashableIntSpliteratorWithNull") + public static Object[][] spliteratorNullDataProvider() { + if (spliteratorDataProviderWithNull != null) { + return spliteratorDataProviderWithNull; + } + + List<Object[]> data = new ArrayList<>(); + for (int size : SIZES) { + List<HashableInteger> exp = listIntRange(size, true); + exp.add(0, null); + SpliteratorDataBuilder<HashableInteger> db = new SpliteratorDataBuilder<>(data, exp); + + // Maps + db.addMap(HashMap::new); + db.addMap(LinkedHashMap::new); + // TODO: add this back in if we decide to keep TreeBin in WeakHashMap + //db.addMap(WeakHashMap::new); + + // Collections that use HashMap + db.addCollection(HashSet::new); + db.addCollection(LinkedHashSet::new); +// db.addCollection(TreeSet::new); + + } + return spliteratorDataProviderWithNull = data.toArray(new Object[0][]); + } + + final static class HashableInteger implements Comparable<HashableInteger> { + + final int value; + final int hashmask; //yes duplication + + HashableInteger(int value, int hashmask) { + this.value = value; + this.hashmask = hashmask; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HashableInteger) { + HashableInteger other = (HashableInteger) obj; + + return other.value == value; + } + + return false; + } + + @Override + public int hashCode() { + return value % hashmask; + } + + @Override + public int compareTo(HashableInteger o) { + return value - o.value; + } + + @Override + public String toString() { + return Integer.toString(value); + } + } + + private static List<HashableInteger> listIntRange(int upTo, boolean withNull) { + List<HashableInteger> exp = new ArrayList<>(); + if (withNull) { + exp.add(null); + } + for (int i = 0; i < upTo; i++) { + exp.add(new HashableInteger(i, 10)); + } + return Collections.unmodifiableList(exp); + } + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testNullPointerException(String description, Collection exp, Supplier<Spliterator> s) { + executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining(null)); + executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance(null)); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testNullPointerExceptionWithNull(String description, Collection exp, Supplier<Spliterator> s) { + executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining(null)); + executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance(null)); + } + + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testForEach(String description, Collection exp, Supplier<Spliterator> s) { + testForEach(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testForEachWithNull(String description, Collection exp, Supplier<Spliterator> s) { + testForEach(exp, s, (Consumer<Object> b) -> b); + } + + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testTryAdvance(String description, Collection exp, Supplier<Spliterator> s) { + testTryAdvance(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testTryAdvanceWithNull(String description, Collection exp, Supplier<Spliterator> s) { + testTryAdvance(exp, s, (Consumer<Object> b) -> b); + } + +/* skip this test until 8013649 is fixed + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testMixedTryAdvanceForEach(String description, Collection exp, Supplier<Spliterator> s) { + testMixedTryAdvanceForEach(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testMixedTryAdvanceForEachWithNull(String description, Collection exp, Supplier<Spliterator> s) { + testMixedTryAdvanceForEach(exp, s, (Consumer<Object> b) -> b); + } +*/ + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitAfterFullTraversal(String description, Collection exp, Supplier<Spliterator> s) { + testSplitAfterFullTraversal(s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitAfterFullTraversalWithNull(String description, Collection exp, Supplier<Spliterator> s) { + testSplitAfterFullTraversal(s, (Consumer<Object> b) -> b); + } + + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitOnce(String description, Collection exp, Supplier<Spliterator> s) { + testSplitOnce(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitOnceWithNull(String description, Collection exp, Supplier<Spliterator> s) { + testSplitOnce(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitSixDeep(String description, Collection exp, Supplier<Spliterator> s) { + testSplitSixDeep(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitSixDeepWithNull(String description, Collection exp, Supplier<Spliterator> s) { + testSplitSixDeep(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitUntilNull(String description, Collection exp, Supplier<Spliterator> s) { + testSplitUntilNull(exp, s, (Consumer<Object> b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitUntilNullWithNull(String description, Collection exp, Supplier<Spliterator> s) { + testSplitUntilNull(exp, s, (Consumer<Object> b) -> b); + } + + private static <T, S extends Spliterator<T>> void testForEach( + Collection<T> exp, + Supplier<S> supplier, + UnaryOperator<Consumer<T>> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + ArrayList<T> fromForEach = new ArrayList<>(); + spliterator = supplier.get(); + Consumer<T> addToFromForEach = boxingAdapter.apply(fromForEach::add); + spliterator.forEachRemaining(addToFromForEach); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + // assert that size, tryAdvance, and forEach are consistent + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, exp.size()); + } + if (exp.contains(null)) { + assertTrue(fromForEach.contains(null)); + } + assertEquals(fromForEach.size(), exp.size()); + + assertContents(fromForEach, exp, isOrdered); + } + + private static <T, S extends Spliterator<T>> void testTryAdvance( + Collection<T> exp, + Supplier<S> supplier, + UnaryOperator<Consumer<T>> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + spliterator = supplier.get(); + ArrayList<T> fromTryAdvance = new ArrayList<>(); + Consumer<T> addToFromTryAdvance = boxingAdapter.apply(fromTryAdvance::add); + while (spliterator.tryAdvance(addToFromTryAdvance)) { } + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + // assert that size, tryAdvance, and forEach are consistent + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, exp.size()); + } + assertEquals(fromTryAdvance.size(), exp.size()); + + assertContents(fromTryAdvance, exp, isOrdered); + } + + private static <T, S extends Spliterator<T>> void testMixedTryAdvanceForEach( + Collection<T> exp, + Supplier<S> supplier, + UnaryOperator<Consumer<T>> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + // tryAdvance first few elements, then forEach rest + ArrayList<T> dest = new ArrayList<>(); + spliterator = supplier.get(); + Consumer<T> addToDest = boxingAdapter.apply(dest::add); + for (int i = 0; i < 10 && spliterator.tryAdvance(addToDest); i++) { } + spliterator.forEachRemaining(addToDest); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, dest.size()); + } + assertEquals(dest.size(), exp.size()); + + if (isOrdered) { + assertEquals(dest, exp); + } + else { + assertContentsUnordered(dest, exp); + } + } + + private static <T, S extends Spliterator<T>> void testSplitAfterFullTraversal( + Supplier<S> supplier, + UnaryOperator<Consumer<T>> boxingAdapter) { + // Full traversal using tryAdvance + Spliterator<T> spliterator = supplier.get(); + while (spliterator.tryAdvance(boxingAdapter.apply(e -> { }))) { } + Spliterator<T> split = spliterator.trySplit(); + assertNull(split); + + // Full traversal using forEach + spliterator = supplier.get(); + spliterator.forEachRemaining(boxingAdapter.apply(e -> { + })); + split = spliterator.trySplit(); + assertNull(split); + + // Full traversal using tryAdvance then forEach + spliterator = supplier.get(); + spliterator.tryAdvance(boxingAdapter.apply(e -> { })); + spliterator.forEachRemaining(boxingAdapter.apply(e -> { + })); + split = spliterator.trySplit(); + assertNull(split); + } + + private static <T, S extends Spliterator<T>> void testSplitOnce( + Collection<T> exp, + Supplier<S> supplier, + UnaryOperator<Consumer<T>> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + ArrayList<T> fromSplit = new ArrayList<>(); + Spliterator<T> s1 = supplier.get(); + Spliterator<T> s2 = s1.trySplit(); + long s1Size = s1.getExactSizeIfKnown(); + long s2Size = (s2 != null) ? s2.getExactSizeIfKnown() : 0; + + Consumer<T> addToFromSplit = boxingAdapter.apply(fromSplit::add); + if (s2 != null) + s2.forEachRemaining(addToFromSplit); + s1.forEachRemaining(addToFromSplit); + + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, fromSplit.size()); + if (s1Size >= 0 && s2Size >= 0) + assertEquals(sizeIfKnown, s1Size + s2Size); + } + assertContents(fromSplit, exp, isOrdered); + } + + private static <T, S extends Spliterator<T>> void testSplitSixDeep( + Collection<T> exp, + Supplier<S> supplier, + UnaryOperator<Consumer<T>> boxingAdapter) { + S spliterator = supplier.get(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + for (int depth=0; depth < 6; depth++) { + List<T> dest = new ArrayList<>(); + spliterator = supplier.get(); + + assertSpliterator(spliterator); + + // verify splitting with forEach + visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), false); + assertContents(dest, exp, isOrdered); + + // verify splitting with tryAdvance + dest.clear(); + spliterator = supplier.get(); + visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), true); + assertContents(dest, exp, isOrdered); + } + } + + private static <T, S extends Spliterator<T>> void visit(int depth, int curLevel, + List<T> dest, S spliterator, UnaryOperator<Consumer<T>> boxingAdapter, + int rootCharacteristics, boolean useTryAdvance) { + if (curLevel < depth) { + long beforeSize = spliterator.getExactSizeIfKnown(); + Spliterator<T> split = spliterator.trySplit(); + if (split != null) { + assertSpliterator(split, rootCharacteristics); + assertSpliterator(spliterator, rootCharacteristics); + + if ((rootCharacteristics & Spliterator.SUBSIZED) != 0 && + (rootCharacteristics & Spliterator.SIZED) != 0) { + assertEquals(beforeSize, split.estimateSize() + spliterator.estimateSize()); + } + visit(depth, curLevel + 1, dest, split, boxingAdapter, rootCharacteristics, useTryAdvance); + } + visit(depth, curLevel + 1, dest, spliterator, boxingAdapter, rootCharacteristics, useTryAdvance); + } + else { + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + if (useTryAdvance) { + Consumer<T> addToDest = boxingAdapter.apply(dest::add); + int count = 0; + while (spliterator.tryAdvance(addToDest)) { + ++count; + } + + if (sizeIfKnown >= 0) + assertEquals(sizeIfKnown, count); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + + Spliterator<T> split = spliterator.trySplit(); + assertNull(split); + } + else { + List<T> leafDest = new ArrayList<>(); + Consumer<T> addToLeafDest = boxingAdapter.apply(leafDest::add); + spliterator.forEachRemaining(addToLeafDest); + + if (sizeIfKnown >= 0) + assertEquals(sizeIfKnown, leafDest.size()); + + // Assert that forEach now produces no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + Spliterator<T> split = spliterator.trySplit(); + assertNull(split); + + dest.addAll(leafDest); + } + } + } + + private static <T, S extends Spliterator<T>> void testSplitUntilNull( + Collection<T> exp, + Supplier<S> supplier, + UnaryOperator<Consumer<T>> boxingAdapter) { + Spliterator<T> s = supplier.get(); + boolean isOrdered = s.hasCharacteristics(Spliterator.ORDERED); + assertSpliterator(s); + + List<T> splits = new ArrayList<>(); + Consumer<T> c = boxingAdapter.apply(splits::add); + + testSplitUntilNull(new SplitNode<T>(c, s)); + assertContents(splits, exp, isOrdered); + } + + private static class SplitNode<T> { + // Constant for every node + final Consumer<T> c; + final int rootCharacteristics; + + final Spliterator<T> s; + + SplitNode(Consumer<T> c, Spliterator<T> s) { + this(c, s.characteristics(), s); + } + + private SplitNode(Consumer<T> c, int rootCharacteristics, Spliterator<T> s) { + this.c = c; + this.rootCharacteristics = rootCharacteristics; + this.s = s; + } + + SplitNode<T> fromSplit(Spliterator<T> split) { + return new SplitNode<>(c, rootCharacteristics, split); + } + } + + /** + * Set the maximum stack capacity to 0.25MB. This should be more than enough to detect a bad spliterator + * while not unduly disrupting test infrastructure given the test data sizes that are used are small. + * Note that j.u.c.ForkJoinPool sets the max queue size to 64M (1 << 26). + */ + private static final int MAXIMUM_STACK_CAPACITY = 1 << 18; // 0.25MB + + private static <T> void testSplitUntilNull(SplitNode<T> e) { + // Use an explicit stack to avoid a StackOverflowException when testing a Spliterator + // that when repeatedly split produces a right-balanced (and maybe degenerate) tree, or + // for a spliterator that is badly behaved. + Deque<SplitNode<T>> stack = new ArrayDeque<>(); + stack.push(e); + + int iteration = 0; + while (!stack.isEmpty()) { + assertTrue(iteration++ < MAXIMUM_STACK_CAPACITY, "Exceeded maximum stack modification count of 1 << 18"); + + e = stack.pop(); + Spliterator<T> parentAndRightSplit = e.s; + + long parentEstimateSize = parentAndRightSplit.estimateSize(); + assertTrue(parentEstimateSize >= 0, + String.format("Split size estimate %d < 0", parentEstimateSize)); + + long parentSize = parentAndRightSplit.getExactSizeIfKnown(); + Spliterator<T> leftSplit = parentAndRightSplit.trySplit(); + if (leftSplit == null) { + parentAndRightSplit.forEachRemaining(e.c); + continue; + } + + assertSpliterator(leftSplit, e.rootCharacteristics); + assertSpliterator(parentAndRightSplit, e.rootCharacteristics); + + if (parentEstimateSize != Long.MAX_VALUE && leftSplit.estimateSize() > 0 && parentAndRightSplit.estimateSize() > 0) { + assertTrue(leftSplit.estimateSize() < parentEstimateSize, + String.format("Left split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + assertTrue(parentAndRightSplit.estimateSize() < parentEstimateSize, + String.format("Right split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + } + else { + assertTrue(leftSplit.estimateSize() <= parentEstimateSize, + String.format("Left split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + assertTrue(parentAndRightSplit.estimateSize() <= parentEstimateSize, + String.format("Right split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + } + + long leftSize = leftSplit.getExactSizeIfKnown(); + long rightSize = parentAndRightSplit.getExactSizeIfKnown(); + if (parentSize >= 0 && leftSize >= 0 && rightSize >= 0) + assertEquals(parentSize, leftSize + rightSize, + String.format("exact left split size %d + exact right split size %d != parent exact split size %d", + leftSize, rightSize, parentSize)); + + // Add right side to stack first so left side is popped off first + stack.push(e.fromSplit(parentAndRightSplit)); + stack.push(e.fromSplit(leftSplit)); + } + } + + private static void assertSpliterator(Spliterator<?> s, int rootCharacteristics) { + if ((rootCharacteristics & Spliterator.SUBSIZED) != 0) { + assertTrue(s.hasCharacteristics(Spliterator.SUBSIZED), + "Child split is not SUBSIZED when root split is SUBSIZED"); + } + assertSpliterator(s); + } + + private static void assertSpliterator(Spliterator<?> s) { + if (s.hasCharacteristics(Spliterator.SUBSIZED)) { + assertTrue(s.hasCharacteristics(Spliterator.SIZED)); + } + if (s.hasCharacteristics(Spliterator.SIZED)) { + assertTrue(s.estimateSize() != Long.MAX_VALUE); + assertTrue(s.getExactSizeIfKnown() >= 0); + } + try { + s.getComparator(); + assertTrue(s.hasCharacteristics(Spliterator.SORTED)); + } catch (IllegalStateException e) { + assertFalse(s.hasCharacteristics(Spliterator.SORTED)); + } + } + + private static<T> void assertContents(Collection<T> actual, Collection<T> expected, boolean isOrdered) { + if (isOrdered) { + assertEquals(actual, expected); + } + else { + assertContentsUnordered(actual, expected); + } + } + + private static<T> void assertContentsUnordered(Iterable<T> actual, Iterable<T> expected) { + assertEquals(toBoxedMultiset(actual), toBoxedMultiset(expected)); + } + + private static <T> Map<T, HashableInteger> toBoxedMultiset(Iterable<T> c) { + Map<T, HashableInteger> result = new HashMap<>(); + c.forEach((Consumer) e -> { + if (result.containsKey((T)e)) { + result.put((T)e, new HashableInteger(((HashableInteger)result.get(e)).value + 1, 10)); + } else { + result.put((T)e, new HashableInteger(1, 10)); + } + }); + return result; + } + + private void executeAndCatch(Class<? extends Exception> expected, Runnable r) { + Exception caught = null; + try { + r.run(); + } + catch (Exception e) { + caught = e; + } + + assertNotNull(caught, + String.format("No Exception was thrown, expected an Exception of %s to be thrown", + expected.getName())); + assertTrue(expected.isInstance(caught), + String.format("Exception thrown %s not an instance of %s", + caught.getClass().getName(), expected.getName())); + } + +}
--- a/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -128,6 +128,10 @@ void addMap(Function<Map<T, T>, ? extends Map<T, T>> m) { String description = "new " + m.apply(Collections.<T, T>emptyMap()).getClass().getName(); + addMap(m, description); + } + + void addMap(Function<Map<T, T>, ? extends Map<T, T>> m, String description) { add(description + ".keySet().spliterator()", () -> m.apply(mExp).keySet().spliterator()); add(description + ".values().spliterator()", () -> m.apply(mExp).values().spliterator()); add(description + ".entrySet().spliterator()", mExp.entrySet(), () -> m.apply(mExp).entrySet().spliterator()); @@ -399,12 +403,36 @@ db.addMap(HashMap::new); + db.addMap(m -> { + // Create a Map ensuring that for large sizes + // buckets will contain 2 or more entries + HashMap<Integer, Integer> cm = new HashMap<>(1, m.size() + 1); + // Don't use putAll which inflates the table by + // m.size() * loadFactor, thus creating a very sparse + // map for 1000 entries defeating the purpose of this test, + // in addition it will cause the split until null test to fail + // because the number of valid splits is larger than the + // threshold + for (Map.Entry<Integer, Integer> e : m.entrySet()) + cm.put(e.getKey(), e.getValue()); + return cm; + }, "new java.util.HashMap(1, size + 1)"); + db.addMap(LinkedHashMap::new); db.addMap(IdentityHashMap::new); db.addMap(WeakHashMap::new); + db.addMap(m -> { + // Create a Map ensuring that for large sizes + // buckets will be consist of 2 or more entries + WeakHashMap<Integer, Integer> cm = new WeakHashMap<>(1, m.size() + 1); + for (Map.Entry<Integer, Integer> e : m.entrySet()) + cm.put(e.getKey(), e.getValue()); + return cm; + }, "new java.util.WeakHashMap(1, size + 1)"); + // @@@ Descending maps etc db.addMap(TreeMap::new);
--- a/test/java/util/jar/TestExtra.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/jar/TestExtra.java Wed Jun 05 13:10:11 2013 -0300 @@ -23,7 +23,7 @@ /** * @test - * @bug 6480504 + * @bug 6480504 6303183 * @summary Test that client-provided data in the extra field is written and * read correctly, taking into account the JAR_MAGIC written into the extra * field of the first entry of JAR files. @@ -117,8 +117,7 @@ ZipInputStream zis = getInputStream(); ze = zis.getNextEntry(); - byte[] e = ze.getExtra(); - check(e.length == 8, "expected extra length is 8, got " + e.length); + checkExtra(data, ze.getExtra()); checkEntry(ze, 0, 0); } @@ -140,10 +139,43 @@ ZipInputStream zis = getInputStream(); ze = zis.getNextEntry(); byte[] e = ze.getExtra(); - check(e.length == 8, "expected extra length is 8, got " + e.length); + checkExtra(data, ze.getExtra()); checkEntry(ze, 0, 0); } + // check if all "expected" extra fields equal to their + // corresponding fields in "extra". The "extra" might have + // timestamp fields added by ZOS. + static void checkExtra(byte[] expected, byte[] extra) { + if (expected == null) + return; + int off = 0; + int len = expected.length; + while (off + 4 < len) { + int tag = get16(expected, off); + int sz = get16(expected, off + 2); + int off0 = 0; + int len0 = extra.length; + boolean matched = false; + while (off0 + 4 < len0) { + int tag0 = get16(extra, off0); + int sz0 = get16(extra, off0 + 2); + if (tag == tag0 && sz == sz0) { + matched = true; + for (int i = 0; i < sz; i++) { + if (expected[off + i] != extra[off0 +i]) + matched = false; + } + break; + } + off0 += (4 + sz0); + } + if (!matched) { + fail("Expected extra data [tag=" + tag + "sz=" + sz + "] check failed"); + } + off += (4 + sz); + } + } /** Check that the entry's extra data is correct. */ void checkEntry(ZipEntry ze, int count, int dataLength) {
--- a/test/java/util/stream/test/org/openjdk/tests/java/util/FillableStringTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/stream/test/org/openjdk/tests/java/util/FillableStringTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/java/util/stream/test/org/openjdk/tests/java/util/MapTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/stream/test/org/openjdk/tests/java/util/MapTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorLateBindingFailFastTest.java Wed May 29 13:22:58 2013 -0300 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,358 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package org.openjdk.tests.java.util.stream; - -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.ConcurrentModificationException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.Spliterator; -import java.util.Stack; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.Vector; -import java.util.WeakHashMap; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.Supplier; - -import static org.testng.Assert.*; - -/** - * @test - * @summary Spliterator last-binding and fail-fast tests - * @run testng SpliteratorLateBindingFailFastTest - */ - -@Test(groups = { "serialization-hostile" }) -public class SpliteratorLateBindingFailFastTest { - - private interface Source<T> { - Collection<T> asCollection(); - void update(); - } - - private static class SpliteratorDataBuilder<T> { - final List<Object[]> data; - - final T newValue; - - final List<T> exp; - - final Map<T, T> mExp; - - SpliteratorDataBuilder(List<Object[]> data, T newValue, List<T> exp) { - this.data = data; - this.newValue = newValue; - this.exp = exp; - this.mExp = createMap(exp); - } - - Map<T, T> createMap(List<T> l) { - Map<T, T> m = new LinkedHashMap<>(); - for (T t : l) { - m.put(t, t); - } - return m; - } - - void add(String description, Supplier<Source<?>> s) { - description = joiner(description).toString(); - data.add(new Object[]{description, s}); - } - - void addCollection(Function<Collection<T>, ? extends Collection<T>> f) { - class CollectionSource implements Source<T> { - final Collection<T> c = f.apply(exp); - - final Consumer<Collection<T>> updater; - - CollectionSource(Consumer<Collection<T>> updater) { - this.updater = updater; - } - - @Override - public Collection<T> asCollection() { - return c; - } - - @Override - public void update() { - updater.accept(c); - } - } - - String description = "new " + f.apply(Collections.<T>emptyList()).getClass().getName() + ".spliterator() "; - add(description + "ADD", () -> new CollectionSource(c -> c.add(newValue))); - add(description + "REMOVE", () -> new CollectionSource(c -> c.remove(c.iterator().next()))); - } - - void addList(Function<Collection<T>, ? extends List<T>> l) { - // @@@ If collection is instance of List then add sub-list tests - addCollection(l); - } - - void addMap(Function<Map<T, T>, ? extends Map<T, T>> mapConstructor) { - class MapSource<U> implements Source<U> { - final Map<T, T> m = mapConstructor.apply(mExp); - - final Collection<U> c; - - final Consumer<Map<T, T>> updater; - - MapSource(Function<Map<T, T>, Collection<U>> f, Consumer<Map<T, T>> updater) { - this.c = f.apply(m); - this.updater = updater; - } - - @Override - public Collection<U> asCollection() { - return c; - } - - @Override - public void update() { - updater.accept(m); - } - } - - Map<String, Consumer<Map<T, T>>> actions = new HashMap<>(); - actions.put("ADD", m -> m.put(newValue, newValue)); - actions.put("REMOVE", m -> m.remove(m.keySet().iterator().next())); - - String description = "new " + mapConstructor.apply(Collections.<T, T>emptyMap()).getClass().getName(); - for (Map.Entry<String, Consumer<Map<T, T>>> e : actions.entrySet()) { - add(description + ".keySet().spliterator() " + e.getKey(), - () -> new MapSource<T>(m -> m.keySet(), e.getValue())); - add(description + ".values().spliterator() " + e.getKey(), - () -> new MapSource<T>(m -> m.values(), e.getValue())); - add(description + ".entrySet().spliterator() " + e.getKey(), - () -> new MapSource<Map.Entry<T, T>>(m -> m.entrySet(), e.getValue())); - } - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorDataProvider; - - @DataProvider(name = "Source") - public static Object[][] spliteratorDataProvider() { - if (spliteratorDataProvider != null) { - return spliteratorDataProvider; - } - - List<Object[]> data = new ArrayList<>(); - SpliteratorDataBuilder<Integer> db = new SpliteratorDataBuilder<>(data, 5, Arrays.asList(1, 2, 3, 4)); - - // Collections - - db.addList(ArrayList::new); - - db.addList(LinkedList::new); - - db.addList(Vector::new); - - - db.addCollection(HashSet::new); - - db.addCollection(LinkedHashSet::new); - - db.addCollection(TreeSet::new); - - - db.addCollection(c -> { Stack<Integer> s = new Stack<>(); s.addAll(c); return s;}); - - db.addCollection(PriorityQueue::new); - - // ArrayDeque fails some tests since it's fail-fast support is weaker - // than other collections and limited to detecting most, but not all, - // removals. It probably requires it's own test since it is difficult - // to abstract out the conditions under which it fails-fast. -// db.addCollection(ArrayDeque::new); - - // Maps - - db.addMap(HashMap::new); - - db.addMap(LinkedHashMap::new); - - // This fails when run through jrteg but passes when run though - // ant -// db.addMap(IdentityHashMap::new); - - db.addMap(WeakHashMap::new); - - // @@@ Descending maps etc - db.addMap(TreeMap::new); - - return spliteratorDataProvider = data.toArray(new Object[0][]); - } - - @Test(dataProvider = "Source") - public <T> void lateBindingTestWithForEach(String description, Supplier<Source<T>> ss) { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - - source.update(); - - Set<T> r = new HashSet<>(); - s.forEachRemaining(r::add); - - assertEquals(r, new HashSet<>(c)); - } - - @Test(dataProvider = "Source") - public <T> void lateBindingTestWithTryAdvance(String description, Supplier<Source<T>> ss) { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - - source.update(); - - Set<T> r = new HashSet<>(); - while (s.tryAdvance(r::add)) { } - - assertEquals(r, new HashSet<>(c)); - } - - @Test(dataProvider = "Source") - public <T> void lateBindingTestWithCharacteritics(String description, Supplier<Source<T>> ss) { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - s.characteristics(); - - Set<T> r = new HashSet<>(); - s.forEachRemaining(r::add); - - assertEquals(r, new HashSet<>(c)); - } - - - @Test(dataProvider = "Source") - public <T> void testFailFastTestWithTryAdvance(String description, Supplier<Source<T>> ss) { - { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - - s.tryAdvance(e -> { - }); - source.update(); - - executeAndCatch(() -> s.tryAdvance(e -> { })); - } - - { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - - s.tryAdvance(e -> { - }); - source.update(); - - executeAndCatch(() -> s.forEachRemaining(e -> { - })); - } - } - - @Test(dataProvider = "Source") - public <T> void testFailFastTestWithForEach(String description, Supplier<Source<T>> ss) { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - - executeAndCatch(() -> s.forEachRemaining(e -> { - source.update(); - })); - } - - @Test(dataProvider = "Source") - public <T> void testFailFastTestWithEstimateSize(String description, Supplier<Source<T>> ss) { - { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - - s.estimateSize(); - source.update(); - - executeAndCatch(() -> s.tryAdvance(e -> { })); - } - - { - Source<T> source = ss.get(); - Collection<T> c = source.asCollection(); - Spliterator<T> s = c.spliterator(); - - s.estimateSize(); - source.update(); - - executeAndCatch(() -> s.forEachRemaining(e -> { - })); - } - } - - private void executeAndCatch(Runnable r) { - executeAndCatch(ConcurrentModificationException.class, r); - } - - private void executeAndCatch(Class<? extends Exception> expected, Runnable r) { - Exception caught = null; - try { - r.run(); - } - catch (Exception e) { - caught = e; - } - - assertNotNull(caught, - String.format("No Exception was thrown, expected an Exception of %s to be thrown", - expected.getName())); - assertTrue(expected.isInstance(caught), - String.format("Exception thrown %s not an instance of %s", - caught.getClass().getName(), expected.getName())); - } - -}
--- a/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorTraversingAndSplittingTest.java Wed May 29 13:22:58 2013 -0300 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1411 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package org.openjdk.tests.java.util.stream; - -/** - * @test - * @summary Spliterator traversing and splitting tests - * @run testng SpliteratorTraversingAndSplittingTest - */ - -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.AbstractCollection; -import java.util.AbstractList; -import java.util.AbstractSet; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; -import java.util.IdentityHashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.SortedSet; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.Stack; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.Vector; -import java.util.WeakHashMap; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.LinkedTransferQueue; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.function.Consumer; -import java.util.function.DoubleConsumer; -import java.util.function.Function; -import java.util.function.IntConsumer; -import java.util.function.LongConsumer; -import java.util.function.Supplier; -import java.util.function.UnaryOperator; - -import static org.testng.Assert.*; -import static org.testng.Assert.assertEquals; - -@Test(groups = { "serialization-hostile" }) -public class SpliteratorTraversingAndSplittingTest { - - private static List<Integer> SIZES = Arrays.asList(0, 1, 10, 100, 1000); - - private static class SpliteratorDataBuilder<T> { - List<Object[]> data; - - List<T> exp; - - Map<T, T> mExp; - - SpliteratorDataBuilder(List<Object[]> data, List<T> exp) { - this.data = data; - this.exp = exp; - this.mExp = createMap(exp); - } - - Map<T, T> createMap(List<T> l) { - Map<T, T> m = new LinkedHashMap<>(); - for (T t : l) { - m.put(t, t); - } - return m; - } - - void add(String description, Collection<?> expected, Supplier<Spliterator<?>> s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier<Spliterator<?>> s) { - add(description, exp, s); - } - - void addCollection(Function<Collection<T>, ? extends Collection<T>> c) { - add("new " + c.apply(Collections.<T>emptyList()).getClass().getName() + ".spliterator()", - () -> c.apply(exp).spliterator()); - } - - void addList(Function<Collection<T>, ? extends List<T>> l) { - // @@@ If collection is instance of List then add sub-list tests - addCollection(l); - } - - void addMap(Function<Map<T, T>, ? extends Map<T, T>> m) { - String description = "new " + m.apply(Collections.<T, T>emptyMap()).getClass().getName(); - add(description + ".keySet().spliterator()", () -> m.apply(mExp).keySet().spliterator()); - add(description + ".values().spliterator()", () -> m.apply(mExp).values().spliterator()); - add(description + ".entrySet().spliterator()", mExp.entrySet(), () -> m.apply(mExp).entrySet().spliterator()); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorDataProvider; - - @DataProvider(name = "Spliterator<Integer>") - public static Object[][] spliteratorDataProvider() { - if (spliteratorDataProvider != null) { - return spliteratorDataProvider; - } - - List<Object[]> data = new ArrayList<>(); - for (int size : SIZES) { - List<Integer> exp = listIntRange(size); - SpliteratorDataBuilder<Integer> db = new SpliteratorDataBuilder<>(data, exp); - - // Direct spliterator methods - - db.add("Spliterators.spliterator(Collection, ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Spliterators.spliterator(Iterator, ...)", - () -> Spliterators.spliterator(exp.iterator(), exp.size(), 0)); - - db.add("Spliterators.spliteratorUnknownSize(Iterator, ...)", - () -> Spliterators.spliteratorUnknownSize(exp.iterator(), 0)); - - db.add("Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Spliterator ), ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(exp.spliterator()), exp.size(), 0)); - - db.add("Spliterators.spliterator(T[], ...)", - () -> Spliterators.spliterator(exp.toArray(new Integer[0]), 0)); - - db.add("Arrays.spliterator(T[], ...)", - () -> Arrays.spliterator(exp.toArray(new Integer[0]))); - - class SpliteratorFromIterator extends Spliterators.AbstractSpliterator<Integer> { - Iterator<Integer> it; - - SpliteratorFromIterator(Iterator<Integer> it, long est) { - super(est, Spliterator.SIZED); - this.it = it; - } - - @Override - public boolean tryAdvance(Consumer<? super Integer> action) { - if (action == null) - throw new NullPointerException(); - if (it.hasNext()) { - action.accept(it.next()); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractSpliterator()", - () -> new SpliteratorFromIterator(exp.iterator(), exp.size())); - - // Collections - - // default method implementations - - class AbstractCollectionImpl extends AbstractCollection<Integer> { - Collection<Integer> c; - - AbstractCollectionImpl(Collection<Integer> c) { - this.c = c; - } - - @Override - public Iterator<Integer> iterator() { - return c.iterator(); - } - - @Override - public int size() { - return c.size(); - } - } - db.addCollection( - c -> new AbstractCollectionImpl(c)); - - class AbstractListImpl extends AbstractList<Integer> { - List<Integer> l; - - AbstractListImpl(Collection<Integer> c) { - this.l = new ArrayList<>(c); - } - - @Override - public Integer get(int index) { - return l.get(index); - } - - @Override - public int size() { - return l.size(); - } - } - db.addCollection( - c -> new AbstractListImpl(c)); - - class AbstractSetImpl extends AbstractSet<Integer> { - Set<Integer> s; - - AbstractSetImpl(Collection<Integer> c) { - this.s = new HashSet<>(c); - } - - @Override - public Iterator<Integer> iterator() { - return s.iterator(); - } - - @Override - public int size() { - return s.size(); - } - } - db.addCollection( - c -> new AbstractSetImpl(c)); - - class AbstractSortedSetImpl extends AbstractSet<Integer> implements SortedSet<Integer> { - SortedSet<Integer> s; - - AbstractSortedSetImpl(Collection<Integer> c) { - this.s = new TreeSet<>(c); - } - - @Override - public Iterator<Integer> iterator() { - return s.iterator(); - } - - @Override - public int size() { - return s.size(); - } - - @Override - public Comparator<? super Integer> comparator() { - return s.comparator(); - } - - @Override - public SortedSet<Integer> subSet(Integer fromElement, Integer toElement) { - return s.subSet(fromElement, toElement); - } - - @Override - public SortedSet<Integer> headSet(Integer toElement) { - return s.headSet(toElement); - } - - @Override - public SortedSet<Integer> tailSet(Integer fromElement) { - return s.tailSet(fromElement); - } - - @Override - public Integer first() { - return s.first(); - } - - @Override - public Integer last() { - return s.last(); - } - - @Override - public Spliterator<Integer> spliterator() { - return SortedSet.super.spliterator(); - } - } - db.addCollection( - c -> new AbstractSortedSetImpl(c)); - - // - - db.add("Arrays.asList().spliterator()", - () -> Spliterators.spliterator(Arrays.asList(exp.toArray(new Integer[0])), 0)); - - db.addList(ArrayList::new); - - db.addList(LinkedList::new); - - db.addList(Vector::new); - - - db.addCollection(HashSet::new); - - db.addCollection(LinkedHashSet::new); - - db.addCollection(TreeSet::new); - - - db.addCollection(c -> { Stack<Integer> s = new Stack<>(); s.addAll(c); return s;}); - - db.addCollection(PriorityQueue::new); - - db.addCollection(ArrayDeque::new); - - - db.addCollection(ConcurrentSkipListSet::new); - - if (size > 0) { - db.addCollection(c -> { - ArrayBlockingQueue<Integer> abq = new ArrayBlockingQueue<>(size); - abq.addAll(c); - return abq; - }); - } - - db.addCollection(PriorityBlockingQueue::new); - - db.addCollection(LinkedBlockingQueue::new); - - db.addCollection(LinkedTransferQueue::new); - - db.addCollection(ConcurrentLinkedQueue::new); - - db.addCollection(LinkedBlockingDeque::new); - - db.addCollection(CopyOnWriteArrayList::new); - - db.addCollection(CopyOnWriteArraySet::new); - - if (size == 1) { - db.addCollection(c -> Collections.singleton(exp.get(0))); - db.addCollection(c -> Collections.singletonList(exp.get(0))); - } - - // Collections.synchronized/unmodifiable/checked wrappers - db.addCollection(Collections::unmodifiableCollection); - db.addCollection(c -> Collections.unmodifiableSet(new HashSet<>(c))); - db.addCollection(c -> Collections.unmodifiableSortedSet(new TreeSet<>(c))); - db.addList(c -> Collections.unmodifiableList(new ArrayList<>(c))); - db.addMap(Collections::unmodifiableMap); - db.addMap(m -> Collections.unmodifiableSortedMap(new TreeMap<>(m))); - - db.addCollection(Collections::synchronizedCollection); - db.addCollection(c -> Collections.synchronizedSet(new HashSet<>(c))); - db.addCollection(c -> Collections.synchronizedSortedSet(new TreeSet<>(c))); - db.addList(c -> Collections.synchronizedList(new ArrayList<>(c))); - db.addMap(Collections::synchronizedMap); - db.addMap(m -> Collections.synchronizedSortedMap(new TreeMap<>(m))); - - db.addCollection(c -> Collections.checkedCollection(c, Integer.class)); - db.addCollection(c -> Collections.checkedQueue(new ArrayDeque<>(c), Integer.class)); - db.addCollection(c -> Collections.checkedSet(new HashSet<>(c), Integer.class)); - db.addCollection(c -> Collections.checkedSortedSet(new TreeSet<>(c), Integer.class)); - db.addList(c -> Collections.checkedList(new ArrayList<>(c), Integer.class)); - db.addMap(c -> Collections.checkedMap(c, Integer.class, Integer.class)); - db.addMap(m -> Collections.checkedSortedMap(new TreeMap<>(m), Integer.class, Integer.class)); - - // Maps - - db.addMap(HashMap::new); - - db.addMap(LinkedHashMap::new); - - db.addMap(IdentityHashMap::new); - - db.addMap(WeakHashMap::new); - - // @@@ Descending maps etc - db.addMap(TreeMap::new); - - db.addMap(ConcurrentHashMap::new); - - db.addMap(ConcurrentSkipListMap::new); - } - - return spliteratorDataProvider = data.toArray(new Object[0][]); - } - - private static List<Integer> listIntRange(int upTo) { - List<Integer> exp = new ArrayList<>(); - for (int i = 0; i < upTo; i++) - exp.add(i); - return Collections.unmodifiableList(exp); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testNullPointerException(String description, Collection exp, Supplier<Spliterator> s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining(null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance(null)); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testForEach(String description, Collection exp, Supplier<Spliterator> s) { - testForEach(exp, s, (Consumer<Object> b) -> b); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testTryAdvance(String description, Collection exp, Supplier<Spliterator> s) { - testTryAdvance(exp, s, (Consumer<Object> b) -> b); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testMixedTryAdvanceForEach(String description, Collection exp, Supplier<Spliterator> s) { - testMixedTryAdvanceForEach(exp, s, (Consumer<Object> b) -> b); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testMixedTraverseAndSplit(String description, Collection exp, Supplier<Spliterator> s) { - testMixedTraverseAndSplit(exp, s, (Consumer<Object> b) -> b); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitAfterFullTraversal(String description, Collection exp, Supplier<Spliterator> s) { - testSplitAfterFullTraversal(s, (Consumer<Object> b) -> b); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitOnce(String description, Collection exp, Supplier<Spliterator> s) { - testSplitOnce(exp, s, (Consumer<Object> b) -> b); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitSixDeep(String description, Collection exp, Supplier<Spliterator> s) { - testSplitSixDeep(exp, s, (Consumer<Object> b) -> b); - } - - @Test(dataProvider = "Spliterator<Integer>") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitUntilNull(String description, Collection exp, Supplier<Spliterator> s) { - testSplitUntilNull(exp, s, (Consumer<Object> b) -> b); - } - - // - - private static class SpliteratorOfIntDataBuilder { - List<Object[]> data; - - List<Integer> exp; - - SpliteratorOfIntDataBuilder(List<Object[]> data, List<Integer> exp) { - this.data = data; - this.exp = exp; - } - - void add(String description, List<Integer> expected, Supplier<Spliterator.OfInt> s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier<Spliterator.OfInt> s) { - add(description, exp, s); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorOfIntDataProvider; - - @DataProvider(name = "Spliterator.OfInt") - public static Object[][] spliteratorOfIntDataProvider() { - if (spliteratorOfIntDataProvider != null) { - return spliteratorOfIntDataProvider; - } - - List<Object[]> data = new ArrayList<>(); - for (int size : SIZES) { - int exp[] = arrayIntRange(size); - SpliteratorOfIntDataBuilder db = new SpliteratorOfIntDataBuilder(data, listIntRange(size)); - - db.add("Spliterators.spliterator(int[], ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Arrays.spliterator(int[], ...)", - () -> Arrays.spliterator(exp)); - - db.add("Spliterators.spliterator(PrimitiveIterator.OfInt, ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); - - db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfInt, ...)", - () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); - - class IntSpliteratorFromArray extends Spliterators.AbstractIntSpliterator { - int[] a; - int index = 0; - - IntSpliteratorFromArray(int[] a) { - super(a.length, Spliterator.SIZED); - this.a = a; - } - - @Override - public boolean tryAdvance(IntConsumer action) { - if (action == null) - throw new NullPointerException(); - if (index < a.length) { - action.accept(a[index++]); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractIntAdvancingSpliterator()", - () -> new IntSpliteratorFromArray(exp)); - } - - return spliteratorOfIntDataProvider = data.toArray(new Object[0][]); - } - - private static int[] arrayIntRange(int upTo) { - int[] exp = new int[upTo]; - for (int i = 0; i < upTo; i++) - exp[i] = i; - return exp; - } - - private static UnaryOperator<Consumer<Integer>> intBoxingConsumer() { - class BoxingAdapter implements Consumer<Integer>, IntConsumer { - private final Consumer<Integer> b; - - BoxingAdapter(Consumer<Integer> b) { - this.b = b; - } - - @Override - public void accept(Integer value) { - throw new IllegalStateException(); - } - - @Override - public void accept(int value) { - b.accept(value); - } - } - - return b -> new BoxingAdapter(b); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntNullPointerException(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((IntConsumer) null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((IntConsumer) null)); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntForEach(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testForEach(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntTryAdvance(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testTryAdvance(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntMixedTryAdvanceForEach(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testMixedTryAdvanceForEach(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntMixedTraverseAndSplit(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testMixedTraverseAndSplit(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitAfterFullTraversal(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testSplitAfterFullTraversal(s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitOnce(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testSplitOnce(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitSixDeep(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testSplitSixDeep(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitUntilNull(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) { - testSplitUntilNull(exp, s, intBoxingConsumer()); - } - - // - - private static class SpliteratorOfLongDataBuilder { - List<Object[]> data; - - List<Long> exp; - - SpliteratorOfLongDataBuilder(List<Object[]> data, List<Long> exp) { - this.data = data; - this.exp = exp; - } - - void add(String description, List<Long> expected, Supplier<Spliterator.OfLong> s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier<Spliterator.OfLong> s) { - add(description, exp, s); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorOfLongDataProvider; - - @DataProvider(name = "Spliterator.OfLong") - public static Object[][] spliteratorOfLongDataProvider() { - if (spliteratorOfLongDataProvider != null) { - return spliteratorOfLongDataProvider; - } - - List<Object[]> data = new ArrayList<>(); - for (int size : SIZES) { - long exp[] = arrayLongRange(size); - SpliteratorOfLongDataBuilder db = new SpliteratorOfLongDataBuilder(data, listLongRange(size)); - - db.add("Spliterators.spliterator(long[], ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Arrays.spliterator(long[], ...)", - () -> Arrays.spliterator(exp)); - - db.add("Spliterators.spliterator(PrimitiveIterator.OfLong, ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); - - db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfLong, ...)", - () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); - - class LongSpliteratorFromArray extends Spliterators.AbstractLongSpliterator { - long[] a; - int index = 0; - - LongSpliteratorFromArray(long[] a) { - super(a.length, Spliterator.SIZED); - this.a = a; - } - - @Override - public boolean tryAdvance(LongConsumer action) { - if (action == null) - throw new NullPointerException(); - if (index < a.length) { - action.accept(a[index++]); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractLongAdvancingSpliterator()", - () -> new LongSpliteratorFromArray(exp)); - } - - return spliteratorOfLongDataProvider = data.toArray(new Object[0][]); - } - - private static List<Long> listLongRange(int upTo) { - List<Long> exp = new ArrayList<>(); - for (long i = 0; i < upTo; i++) - exp.add(i); - return Collections.unmodifiableList(exp); - } - - private static long[] arrayLongRange(int upTo) { - long[] exp = new long[upTo]; - for (int i = 0; i < upTo; i++) - exp[i] = i; - return exp; - } - - private static UnaryOperator<Consumer<Long>> longBoxingConsumer() { - class BoxingAdapter implements Consumer<Long>, LongConsumer { - private final Consumer<Long> b; - - BoxingAdapter(Consumer<Long> b) { - this.b = b; - } - - @Override - public void accept(Long value) { - throw new IllegalStateException(); - } - - @Override - public void accept(long value) { - b.accept(value); - } - } - - return b -> new BoxingAdapter(b); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongNullPointerException(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((LongConsumer) null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((LongConsumer) null)); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongForEach(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testForEach(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongTryAdvance(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testTryAdvance(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongMixedTryAdvanceForEach(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testMixedTryAdvanceForEach(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongMixedTraverseAndSplit(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testMixedTraverseAndSplit(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitAfterFullTraversal(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testSplitAfterFullTraversal(s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitOnce(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testSplitOnce(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitSixDeep(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testSplitSixDeep(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitUntilNull(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) { - testSplitUntilNull(exp, s, longBoxingConsumer()); - } - - // - - private static class SpliteratorOfDoubleDataBuilder { - List<Object[]> data; - - List<Double> exp; - - SpliteratorOfDoubleDataBuilder(List<Object[]> data, List<Double> exp) { - this.data = data; - this.exp = exp; - } - - void add(String description, List<Double> expected, Supplier<Spliterator.OfDouble> s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier<Spliterator.OfDouble> s) { - add(description, exp, s); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorOfDoubleDataProvider; - - @DataProvider(name = "Spliterator.OfDouble") - public static Object[][] spliteratorOfDoubleDataProvider() { - if (spliteratorOfDoubleDataProvider != null) { - return spliteratorOfDoubleDataProvider; - } - - List<Object[]> data = new ArrayList<>(); - for (int size : SIZES) { - double exp[] = arrayDoubleRange(size); - SpliteratorOfDoubleDataBuilder db = new SpliteratorOfDoubleDataBuilder(data, listDoubleRange(size)); - - db.add("Spliterators.spliterator(double[], ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Arrays.spliterator(double[], ...)", - () -> Arrays.spliterator(exp)); - - db.add("Spliterators.spliterator(PrimitiveIterator.OfDouble, ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); - - db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfDouble, ...)", - () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); - - class DoubleSpliteratorFromArray extends Spliterators.AbstractDoubleSpliterator { - double[] a; - int index = 0; - - DoubleSpliteratorFromArray(double[] a) { - super(a.length, Spliterator.SIZED); - this.a = a; - } - - @Override - public boolean tryAdvance(DoubleConsumer action) { - if (action == null) - throw new NullPointerException(); - if (index < a.length) { - action.accept(a[index++]); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractDoubleAdvancingSpliterator()", - () -> new DoubleSpliteratorFromArray(exp)); - } - - return spliteratorOfDoubleDataProvider = data.toArray(new Object[0][]); - } - - private static List<Double> listDoubleRange(int upTo) { - List<Double> exp = new ArrayList<>(); - for (double i = 0; i < upTo; i++) - exp.add(i); - return Collections.unmodifiableList(exp); - } - - private static double[] arrayDoubleRange(int upTo) { - double[] exp = new double[upTo]; - for (int i = 0; i < upTo; i++) - exp[i] = i; - return exp; - } - - private static UnaryOperator<Consumer<Double>> doubleBoxingConsumer() { - class BoxingAdapter implements Consumer<Double>, DoubleConsumer { - private final Consumer<Double> b; - - BoxingAdapter(Consumer<Double> b) { - this.b = b; - } - - @Override - public void accept(Double value) { - throw new IllegalStateException(); - } - - @Override - public void accept(double value) { - b.accept(value); - } - } - - return b -> new BoxingAdapter(b); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleNullPointerException(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((DoubleConsumer) null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((DoubleConsumer) null)); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleForEach(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testForEach(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleTryAdvance(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testTryAdvance(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleMixedTryAdvanceForEach(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testMixedTryAdvanceForEach(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleMixedTraverseAndSplit(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testMixedTraverseAndSplit(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitAfterFullTraversal(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testSplitAfterFullTraversal(s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitOnce(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testSplitOnce(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitSixDeep(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testSplitSixDeep(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitUntilNull(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) { - testSplitUntilNull(exp, s, doubleBoxingConsumer()); - } - - // - - private static <T, S extends Spliterator<T>> void testForEach( - Collection<T> exp, - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - ArrayList<T> fromForEach = new ArrayList<>(); - spliterator = supplier.get(); - Consumer<T> addToFromForEach = boxingAdapter.apply(fromForEach::add); - spliterator.forEachRemaining(addToFromForEach); - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - // Assert that tryAdvance now produce no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - // assert that size, tryAdvance, and forEach are consistent - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, exp.size()); - } - assertEquals(fromForEach.size(), exp.size()); - - assertContents(fromForEach, exp, isOrdered); - } - - private static <T, S extends Spliterator<T>> void testTryAdvance( - Collection<T> exp, - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - spliterator = supplier.get(); - ArrayList<T> fromTryAdvance = new ArrayList<>(); - Consumer<T> addToFromTryAdvance = boxingAdapter.apply(fromTryAdvance::add); - while (spliterator.tryAdvance(addToFromTryAdvance)) { } - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - // Assert that tryAdvance now produce no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - // assert that size, tryAdvance, and forEach are consistent - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, exp.size()); - } - assertEquals(fromTryAdvance.size(), exp.size()); - - assertContents(fromTryAdvance, exp, isOrdered); - } - - private static <T, S extends Spliterator<T>> void testMixedTryAdvanceForEach( - Collection<T> exp, - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - // tryAdvance first few elements, then forEach rest - ArrayList<T> dest = new ArrayList<>(); - spliterator = supplier.get(); - Consumer<T> addToDest = boxingAdapter.apply(dest::add); - for (int i = 0; i < 10 && spliterator.tryAdvance(addToDest); i++) { } - spliterator.forEachRemaining(addToDest); - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - // Assert that tryAdvance now produce no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, dest.size()); - } - assertEquals(dest.size(), exp.size()); - - if (isOrdered) { - assertEquals(dest, exp); - } - else { - assertContentsUnordered(dest, exp); - } - } - - private static <T, S extends Spliterator<T>> void testMixedTraverseAndSplit( - Collection<T> exp, - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - ArrayList<T> dest = new ArrayList<>(); - spliterator = supplier.get(); - Consumer<T> b = boxingAdapter.apply(dest::add); - - Spliterator<T> spl1, spl2, spl3; - spliterator.tryAdvance(b); - spl2 = spliterator.trySplit(); - if (spl2 != null) { - spl2.tryAdvance(b); - spl1 = spl2.trySplit(); - if (spl1 != null) { - spl1.tryAdvance(b); - spl1.forEachRemaining(b); - } - spl2.tryAdvance(b); - spl2.forEachRemaining(b); - } - spliterator.tryAdvance(b); - spl3 = spliterator.trySplit(); - if (spl3 != null) { - spl3.tryAdvance(b); - spl3.forEachRemaining(b); - } - spliterator.tryAdvance(b); - spliterator.forEachRemaining(b); - - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, dest.size()); - } - assertEquals(dest.size(), exp.size()); - - if (isOrdered) { - assertEquals(dest, exp); - } - else { - assertContentsUnordered(dest, exp); - } - } - - private static <T, S extends Spliterator<T>> void testSplitAfterFullTraversal( - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - // Full traversal using tryAdvance - Spliterator<T> spliterator = supplier.get(); - while (spliterator.tryAdvance(boxingAdapter.apply(e -> { }))) { } - Spliterator<T> split = spliterator.trySplit(); - assertNull(split); - - // Full traversal using forEach - spliterator = supplier.get(); - spliterator.forEachRemaining(boxingAdapter.apply(e -> { - })); - split = spliterator.trySplit(); - assertNull(split); - - // Full traversal using tryAdvance then forEach - spliterator = supplier.get(); - spliterator.tryAdvance(boxingAdapter.apply(e -> { })); - spliterator.forEachRemaining(boxingAdapter.apply(e -> { - })); - split = spliterator.trySplit(); - assertNull(split); - } - - private static <T, S extends Spliterator<T>> void testSplitOnce( - Collection<T> exp, - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - ArrayList<T> fromSplit = new ArrayList<>(); - Spliterator<T> s1 = supplier.get(); - Spliterator<T> s2 = s1.trySplit(); - long s1Size = s1.getExactSizeIfKnown(); - long s2Size = (s2 != null) ? s2.getExactSizeIfKnown() : 0; - Consumer<T> addToFromSplit = boxingAdapter.apply(fromSplit::add); - if (s2 != null) - s2.forEachRemaining(addToFromSplit); - s1.forEachRemaining(addToFromSplit); - - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, fromSplit.size()); - if (s1Size >= 0 && s2Size >= 0) - assertEquals(sizeIfKnown, s1Size + s2Size); - } - assertContents(fromSplit, exp, isOrdered); - } - - private static <T, S extends Spliterator<T>> void testSplitSixDeep( - Collection<T> exp, - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - S spliterator = supplier.get(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - for (int depth=0; depth < 6; depth++) { - List<T> dest = new ArrayList<>(); - spliterator = supplier.get(); - - assertSpliterator(spliterator); - - // verify splitting with forEach - visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), false); - assertContents(dest, exp, isOrdered); - - // verify splitting with tryAdvance - dest.clear(); - spliterator = supplier.get(); - visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), true); - assertContents(dest, exp, isOrdered); - } - } - - private static <T, S extends Spliterator<T>> - void visit(int depth, int curLevel, - List<T> dest, S spliterator, UnaryOperator<Consumer<T>> boxingAdapter, - int rootCharacteristics, boolean useTryAdvance) { - if (curLevel < depth) { - long beforeSize = spliterator.getExactSizeIfKnown(); - Spliterator<T> split = spliterator.trySplit(); - if (split != null) { - assertSpliterator(split, rootCharacteristics); - assertSpliterator(spliterator, rootCharacteristics); - - if ((rootCharacteristics & Spliterator.SUBSIZED) != 0 && - (rootCharacteristics & Spliterator.SIZED) != 0) { - assertEquals(beforeSize, split.estimateSize() + spliterator.estimateSize()); - } - visit(depth, curLevel + 1, dest, split, boxingAdapter, rootCharacteristics, useTryAdvance); - } - visit(depth, curLevel + 1, dest, spliterator, boxingAdapter, rootCharacteristics, useTryAdvance); - } - else { - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - if (useTryAdvance) { - Consumer<T> addToDest = boxingAdapter.apply(dest::add); - int count = 0; - while (spliterator.tryAdvance(addToDest)) { - ++count; - } - - if (sizeIfKnown >= 0) - assertEquals(sizeIfKnown, count); - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - - Spliterator<T> split = spliterator.trySplit(); - assertNull(split); - } - else { - List<T> leafDest = new ArrayList<>(); - Consumer<T> addToLeafDest = boxingAdapter.apply(leafDest::add); - spliterator.forEachRemaining(addToLeafDest); - - if (sizeIfKnown >= 0) - assertEquals(sizeIfKnown, leafDest.size()); - - // Assert that forEach now produces no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - Spliterator<T> split = spliterator.trySplit(); - assertNull(split); - - dest.addAll(leafDest); - } - } - } - - private static <T, S extends Spliterator<T>> void testSplitUntilNull( - Collection<T> exp, - Supplier<S> supplier, - UnaryOperator<Consumer<T>> boxingAdapter) { - Spliterator<T> s = supplier.get(); - boolean isOrdered = s.hasCharacteristics(Spliterator.ORDERED); - assertSpliterator(s); - - List<T> splits = new ArrayList<>(); - Consumer<T> c = boxingAdapter.apply(splits::add); - - testSplitUntilNull(new SplitNode<T>(c, s)); - assertContents(splits, exp, isOrdered); - } - - private static class SplitNode<T> { - // Constant for every node - final Consumer<T> c; - final int rootCharacteristics; - - final Spliterator<T> s; - - SplitNode(Consumer<T> c, Spliterator<T> s) { - this(c, s.characteristics(), s); - } - - private SplitNode(Consumer<T> c, int rootCharacteristics, Spliterator<T> s) { - this.c = c; - this.rootCharacteristics = rootCharacteristics; - this.s = s; - } - - SplitNode<T> fromSplit(Spliterator<T> split) { - return new SplitNode<>(c, rootCharacteristics, split); - } - } - - /** - * Set the maximum stack capacity to 0.25MB. This should be more than enough to detect a bad spliterator - * while not unduly disrupting test infrastructure given the test data sizes that are used are small. - * Note that j.u.c.ForkJoinPool sets the max queue size to 64M (1 << 26). - */ - private static final int MAXIMUM_STACK_CAPACITY = 1 << 18; // 0.25MB - - private static <T> void testSplitUntilNull(SplitNode<T> e) { - // Use an explicit stack to avoid a StackOverflowException when testing a Spliterator - // that when repeatedly split produces a right-balanced (and maybe degenerate) tree, or - // for a spliterator that is badly behaved. - Deque<SplitNode<T>> stack = new ArrayDeque<>(); - stack.push(e); - - int iteration = 0; - while (!stack.isEmpty()) { - assertTrue(iteration++ < MAXIMUM_STACK_CAPACITY, "Exceeded maximum stack modification count of 1 << 18"); - - e = stack.pop(); - Spliterator<T> parentAndRightSplit = e.s; - - long parentEstimateSize = parentAndRightSplit.estimateSize(); - assertTrue(parentEstimateSize >= 0, - String.format("Split size estimate %d < 0", parentEstimateSize)); - - long parentSize = parentAndRightSplit.getExactSizeIfKnown(); - Spliterator<T> leftSplit = parentAndRightSplit.trySplit(); - if (leftSplit == null) { - parentAndRightSplit.forEachRemaining(e.c); - continue; - } - - assertSpliterator(leftSplit, e.rootCharacteristics); - assertSpliterator(parentAndRightSplit, e.rootCharacteristics); - - if (parentEstimateSize != Long.MAX_VALUE && leftSplit.estimateSize() > 0 && parentAndRightSplit.estimateSize() > 0) { - assertTrue(leftSplit.estimateSize() < parentEstimateSize, - String.format("Left split size estimate %d >= parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - assertTrue(parentAndRightSplit.estimateSize() < parentEstimateSize, - String.format("Right split size estimate %d >= parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - } - else { - assertTrue(leftSplit.estimateSize() <= parentEstimateSize, - String.format("Left split size estimate %d > parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - assertTrue(parentAndRightSplit.estimateSize() <= parentEstimateSize, - String.format("Right split size estimate %d > parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - } - - long leftSize = leftSplit.getExactSizeIfKnown(); - long rightSize = parentAndRightSplit.getExactSizeIfKnown(); - if (parentSize >= 0 && leftSize >= 0 && rightSize >= 0) - assertEquals(parentSize, leftSize + rightSize, - String.format("exact left split size %d + exact right split size %d != parent exact split size %d", - leftSize, rightSize, parentSize)); - - // Add right side to stack first so left side is popped off first - stack.push(e.fromSplit(parentAndRightSplit)); - stack.push(e.fromSplit(leftSplit)); - } - } - - private static void assertSpliterator(Spliterator<?> s, int rootCharacteristics) { - if ((rootCharacteristics & Spliterator.SUBSIZED) != 0) { - assertTrue(s.hasCharacteristics(Spliterator.SUBSIZED), - "Child split is not SUBSIZED when root split is SUBSIZED"); - } - assertSpliterator(s); - } - - private static void assertSpliterator(Spliterator<?> s) { - if (s.hasCharacteristics(Spliterator.SUBSIZED)) { - assertTrue(s.hasCharacteristics(Spliterator.SIZED)); - } - if (s.hasCharacteristics(Spliterator.SIZED)) { - assertTrue(s.estimateSize() != Long.MAX_VALUE); - assertTrue(s.getExactSizeIfKnown() >= 0); - } - try { - s.getComparator(); - assertTrue(s.hasCharacteristics(Spliterator.SORTED)); - } catch (IllegalStateException e) { - assertFalse(s.hasCharacteristics(Spliterator.SORTED)); - } - } - - private static<T> void assertContents(Collection<T> actual, Collection<T> expected, boolean isOrdered) { - if (isOrdered) { - assertEquals(actual, expected); - } - else { - assertContentsUnordered(actual, expected); - } - } - - private static<T> void assertContentsUnordered(Iterable<T> actual, Iterable<T> expected) { - assertEquals(toBoxedMultiset(actual), toBoxedMultiset(expected)); - } - - private static <T> Map<T, Integer> toBoxedMultiset(Iterable<T> c) { - Map<T, Integer> result = new HashMap<>(); - c.forEach(e -> { - if (result.containsKey(e)) result.put(e, result.get(e) + 1); - else result.put(e, 1); - }); - return result; - } - - private void executeAndCatch(Class<? extends Exception> expected, Runnable r) { - Exception caught = null; - try { - r.run(); - } - catch (Exception e) { - caught = e; - } - - assertNotNull(caught, - String.format("No Exception was thrown, expected an Exception of %s to be thrown", - expected.getName())); - assertTrue(expected.isInstance(caught), - String.format("Exception thrown %s not an instance of %s", - caught.getClass().getName(), expected.getName())); - } - -}
--- a/test/java/util/zip/StoredCRC.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/zip/StoredCRC.java Wed Jun 05 13:10:11 2013 -0300 @@ -77,9 +77,9 @@ unexpected(t); } - // Test that data corruption is detected. Offset 39 was + // Test that data corruption is detected. "offset" was // determined to be in the entry's uncompressed data. - data[39] ^= 1; + data[getDataOffset(data) + 4] ^= 1; zis = new ZipInputStream( new ByteArrayInputStream(data)); @@ -97,6 +97,15 @@ } } + public static final int getDataOffset(byte b[]) { + final int LOCHDR = 30; // LOC header size + final int LOCEXT = 28; // extra field length + final int LOCNAM = 26; // filename length + int lenExt = Byte.toUnsignedInt(b[LOCEXT]) | (Byte.toUnsignedInt(b[LOCEXT + 1]) << 8); + int lenNam = Byte.toUnsignedInt(b[LOCNAM]) | (Byte.toUnsignedInt(b[LOCNAM + 1]) << 8); + return LOCHDR + lenExt + lenNam; + } + //--------------------- Infrastructure --------------------------- static volatile int passed = 0, failed = 0; static boolean pass() {passed++; return true;}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/java/util/zip/TestExtraTime.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 4759491 6303183 7012868 + * @summary Test ZOS and ZIS timestamp in extra field correctly + */ + +import java.io.*; +import java.util.TimeZone; +import java.util.concurrent.TimeUnit; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import java.util.zip.ZipOutputStream; + + +public class TestExtraTime { + + public static void main(String[] args) throws Throwable{ + + File src = new File(System.getProperty("test.src", "."), "TestExtraTime.java"); + if (src.exists()) { + long mtime = src.lastModified(); + test(mtime, null); + test(10, null); // ms-dos 1980 epoch problem + test(mtime, TimeZone.getTimeZone("Asia/Shanghai")); + } + } + + private static void test(long mtime, TimeZone tz) throws Throwable { + TimeZone tz0 = TimeZone.getDefault(); + if (tz != null) { + TimeZone.setDefault(tz); + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ZipOutputStream zos = new ZipOutputStream(baos); + ZipEntry ze = new ZipEntry("TestExtreTime.java"); + + ze.setTime(mtime); + zos.putNextEntry(ze); + zos.write(new byte[] { 1,2 ,3, 4}); + zos.close(); + if (tz != null) { + TimeZone.setDefault(tz0); + } + ZipInputStream zis = new ZipInputStream( + new ByteArrayInputStream(baos.toByteArray())); + ze = zis.getNextEntry(); + zis.close(); + + System.out.printf("%tc => %tc%n", mtime, ze.getTime()); + + if (TimeUnit.MILLISECONDS.toSeconds(mtime) != + TimeUnit.MILLISECONDS.toSeconds(ze.getTime())) + throw new RuntimeException("Timestamp storing failed!"); + + } +}
--- a/test/java/util/zip/ZipFile/Assortment.java Wed May 29 13:22:58 2013 -0300 +++ b/test/java/util/zip/ZipFile/Assortment.java Wed Jun 05 13:10:11 2013 -0300 @@ -22,7 +22,7 @@ */ /* @test - * @bug 4770745 6234507 + * @bug 4770745 6234507 6303183 * @summary test a variety of zip file entries * @author Martin Buchholz */ @@ -54,6 +54,44 @@ check(condition, "Something's wrong"); } + static final int get16(byte b[], int off) { + return Byte.toUnsignedInt(b[off]) | (Byte.toUnsignedInt(b[off+1]) << 8); + } + + // check if all "expected" extra fields equal to their + // corresponding fields in "extra". The "extra" might have + // timestamp fields added by ZOS. + static boolean equalsExtraData(byte[] expected, byte[] extra) { + if (expected == null) + return true; + int off = 0; + int len = expected.length; + while (off + 4 < len) { + int tag = get16(expected, off); + int sz = get16(expected, off + 2); + int off0 = 0; + int len0 = extra.length; + boolean matched = false; + while (off0 + 4 < len0) { + int tag0 = get16(extra, off0); + int sz0 = get16(extra, off0 + 2); + if (tag == tag0 && sz == sz0) { + matched = true; + for (int i = 0; i < sz; i++) { + if (expected[off + i] != extra[off0 +i]) + matched = false; + } + break; + } + off0 += (4 + sz0); + } + if (!matched) + return false; + off += (4 + sz); + } + return true; + } + private static class Entry { private String name; private int method; @@ -109,7 +147,7 @@ check((((comment == null) || comment.equals("")) && (e.getComment() == null)) || comment.equals(e.getComment())); - check(Arrays.equals(extra, e.getExtra())); + check(equalsExtraData(extra, e.getExtra())); check(Arrays.equals(data, getData(f, e))); check(e.getSize() == data.length); check((method == ZipEntry.DEFLATED) || @@ -129,8 +167,7 @@ byte[] extra = (this.extra != null && this.extra.length == 0) ? null : this.extra; - check(Arrays.equals(extra, e.getExtra())); - + check(equalsExtraData(extra, e.getExtra())); check(name.equals(e.getName())); check(method == e.getMethod()); check(e.getSize() == -1 || e.getSize() == data.length);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/javax/crypto/Cipher/CipherStreamClose.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7160837 + * @summary Make sure Cipher IO streams doesn't call extra doFinal if close() + * is called multiple times. Additionally, verify the input and output streams + * match with encryption and decryption with non-stream crypto. + */ + +import java.io.*; +import java.security.DigestOutputStream; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.util.Arrays; + +import javax.crypto.Cipher; +import javax.crypto.CipherOutputStream; +import javax.crypto.CipherInputStream; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import javax.xml.bind.DatatypeConverter; + +public class CipherStreamClose { + private static final String message = "This is the sample message"; + static boolean debug = false; + + /* + * This method does encryption by cipher.doFinal(), and not with + * CipherOutputStream + */ + public static byte[] blockEncrypt(String message, SecretKey key) + throws Exception { + + byte[] data; + Cipher encCipher = Cipher.getInstance("AES/ECB/PKCS5Padding"); + encCipher.init(Cipher.ENCRYPT_MODE, key); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (ObjectOutputStream oos = new ObjectOutputStream(bos)) { + oos.writeObject(message); + } + data = bos.toByteArray(); + } + + if (debug) { + System.out.println(DatatypeConverter.printHexBinary(data)); + } + return encCipher.doFinal(data); + + } + + /* + * This method does decryption by cipher.doFinal(), and not with + * CipherIntputStream + */ + public static Object blockDecrypt(byte[] data, SecretKey key) + throws Exception { + + Cipher c = Cipher.getInstance("AES/ECB/PKCS5Padding"); + c.init(Cipher.DECRYPT_MODE, key); + data = c.doFinal(data); + try (ByteArrayInputStream bis = new ByteArrayInputStream(data)) { + try (ObjectInputStream ois = new ObjectInputStream(bis)) { + return ois.readObject(); + } + } + } + + public static byte[] streamEncrypt(String message, SecretKey key, + MessageDigest digest) + throws Exception { + + byte[] data; + Cipher encCipher = Cipher.getInstance("AES/ECB/PKCS5Padding"); + encCipher.init(Cipher.ENCRYPT_MODE, key); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); + DigestOutputStream dos = new DigestOutputStream(bos, digest); + CipherOutputStream cos = new CipherOutputStream(dos, encCipher)) { + try (ObjectOutputStream oos = new ObjectOutputStream(cos)) { + oos.writeObject(message); + } + data = bos.toByteArray(); + } + + if (debug) { + System.out.println(DatatypeConverter.printHexBinary(data)); + } + return data; + } + + public static Object streamDecrypt(byte[] data, SecretKey key, + MessageDigest digest) throws Exception { + + Cipher decCipher = Cipher.getInstance("AES/ECB/PKCS5Padding"); + decCipher.init(Cipher.DECRYPT_MODE, key); + digest.reset(); + try (ByteArrayInputStream bis = new ByteArrayInputStream(data); + DigestInputStream dis = new DigestInputStream(bis, digest); + CipherInputStream cis = new CipherInputStream(dis, decCipher)) { + + try (ObjectInputStream ois = new ObjectInputStream(cis)) { + return ois.readObject(); + } + } + } + + public static void main(String[] args) throws Exception { + MessageDigest digest = MessageDigest.getInstance("SHA1"); + SecretKeySpec key = new SecretKeySpec( + DatatypeConverter.parseHexBinary( + "12345678123456781234567812345678"), "AES"); + + // Run 'message' through streamEncrypt + byte[] se = streamEncrypt(message, key, digest); + // 'digest' already has the value from the stream, just finish the op + byte[] sd = digest.digest(); + digest.reset(); + // Run 'message' through blockEncrypt + byte[] be = blockEncrypt(message, key); + // Take digest of encrypted blockEncrypt result + byte[] bd = digest.digest(be); + // Verify both returned the same value + if (!Arrays.equals(sd, bd)) { + System.err.println("Stream: "+DatatypeConverter.printHexBinary(se)+ + "\t Digest: "+DatatypeConverter.printHexBinary(sd)); + System.err.println("Block : "+DatatypeConverter.printHexBinary(be)+ + "\t Digest: "+DatatypeConverter.printHexBinary(bd)); + throw new Exception("stream & block encryption does not match"); + } + + digest.reset(); + // Sanity check: Decrypt separately from stream to verify operations + String bm = (String) blockDecrypt(be, key); + if (message.compareTo(bm) != 0) { + System.err.println("Expected: "+message+"\nBlock: "+bm); + throw new Exception("Block decryption does not match expected"); + } + + // Have decryption and digest included in the object stream + String sm = (String) streamDecrypt(se, key, digest); + if (message.compareTo(sm) != 0) { + System.err.println("Expected: "+message+"\nStream: "+sm); + throw new Exception("Stream decryption does not match expected."); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/javax/swing/text/View/8014863/bug8014863.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8014863 + * @summary Tests the calculation of the line breaks when a text is inserted + * @author Dmitry Markov + * @library ../../../regtesthelpers + * @build Util + * @run main bug8014863 + */ + +import sun.awt.SunToolkit; + +import javax.swing.*; +import javax.swing.text.html.HTMLEditorKit; +import java.awt.*; +import java.awt.event.KeyEvent; + +public class bug8014863 { + + private static JEditorPane editorPane; + private static Robot robot; + private static SunToolkit toolkit; + + public static void main(String[] args) throws Exception { + toolkit = (SunToolkit) Toolkit.getDefaultToolkit(); + robot = new Robot(); + + createAndShowGUI(); + + toolkit.realSync(); + + Util.hitKeys(robot, KeyEvent.VK_HOME); + Util.hitKeys(robot, KeyEvent.VK_O); + + toolkit.realSync(); + + if (3 != getNumberOfTextLines()) { + throw new RuntimeException("The number of texts lines does not meet the expectation"); + } + + Util.hitKeys(robot, KeyEvent.VK_N); + + toolkit.realSync(); + + if (3 != getNumberOfTextLines()) { + throw new RuntimeException("The number of texts lines does not meet the expectation"); + } + + Util.hitKeys(robot, KeyEvent.VK_E); + Util.hitKeys(robot, KeyEvent.VK_SPACE); + Util.hitKeys(robot, KeyEvent.VK_T); + Util.hitKeys(robot, KeyEvent.VK_W); + + toolkit.realSync(); + + if (3 != getNumberOfTextLines()) { + throw new RuntimeException("The number of texts lines does not meet the expectation"); + } + } + + private static int getNumberOfTextLines() throws Exception { + int numberOfLines = 0; + int caretPosition = getCaretPosition(); + int current = 1; + int previous; + + setCaretPosition(current); + do { + previous = current; + Util.hitKeys(robot, KeyEvent.VK_DOWN); + toolkit.realSync(); + current = getCaretPosition(); + numberOfLines++; + } while (current != previous); + + setCaretPosition(caretPosition); + return numberOfLines; + } + + private static int getCaretPosition() throws Exception { + final int[] result = new int[1]; + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + result[0] = editorPane.getCaretPosition(); + } + }); + return result[0]; + } + + private static void setCaretPosition(final int position) throws Exception { + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + editorPane.setCaretPosition(position); + } + }); + } + + private static void createAndShowGUI() throws Exception { + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + try { + UIManager.setLookAndFeel("javax.swing.plaf.metal.MetalLookAndFeel"); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + JFrame frame = new JFrame(); + frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + + editorPane = new JEditorPane(); + HTMLEditorKit editorKit = new HTMLEditorKit(); + editorPane.setEditorKit(editorKit); + editorPane.setText("<p>qqqq <em>pp</em> qqqq <em>pp</em> " + + "qqqq <em>pp</em> qqqq <em>pp</em> qqqq <em>pp</em> qqqq <em>pp" + + "</em> qqqq <em>pp</em> qqqq <em>pp</em> qqqq <em>pp</em> qqqq</p>"); + editorPane.setCaretPosition(1); + + frame.add(editorPane); + frame.setSize(200, 200); + frame.setVisible(true); + } + }); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/javax/swing/text/html/parser/Parser/7011777/bug7011777.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7011777 + * @summary Tests correct parsing of a HTML comment inside 'script' tags + * @author Dmitry Markov + */ + +import javax.swing.text.html.HTMLEditorKit; +import javax.swing.text.html.parser.ParserDelegator; +import java.io.StringReader; + +public class bug7011777 { + static String comment = "<!--\n" + + "function foo() {\n" + + " var tag1 = \"</script>\";\n" + + " var tag2 = \"<div>\";\n" + + " var tag3 = \"</div>\";\n" + + " var tag4 = \"<script>\";\n" + + "}\n" + + "// -->"; + static String html = "<script>" + comment + "</script>"; + public static void main(String[] args) throws Exception { + new ParserDelegator().parse(new StringReader(html), new MyParserCallback(), true); + } + + static class MyParserCallback extends HTMLEditorKit.ParserCallback { + + @Override + public void handleComment(char[] data, int pos) { + String commentWithoutTags = comment.substring("<!--".length(), comment.length() - "-->".length()); + String str = new String(data); + if (!commentWithoutTags.equals(str)) { + System.out.println("Sample string:\n" + commentWithoutTags); + System.out.println("Returned string:\n" + str); + throw new RuntimeException("Test Failed, sample and returned strings are mismatched!"); + } + } + } + +}
--- a/test/jdk/lambda/MethodReferenceTestInstanceMethod.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/MethodReferenceTestInstanceMethod.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/MethodReferenceTestKinds.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/MethodReferenceTestKinds.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/MethodReferenceTestSueCase1.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/MethodReferenceTestSueCase1.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/MethodReferenceTestSueCase2.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/MethodReferenceTestSueCase2.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/MethodReferenceTestSueCase4.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/MethodReferenceTestSueCase4.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/AttributeInjector.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/AttributeInjector.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/ClassFile.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/ClassFile.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/ClassFilePreprocessor.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/ClassFilePreprocessor.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/ClassToInterfaceConverter.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/ClassToInterfaceConverter.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/Compiler.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/Compiler.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/DirectedClassLoader.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/DirectedClassLoader.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/SourceModel.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/SourceModel.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/separate/TestHarness.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/separate/TestHarness.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/vm/DefaultMethodRegressionTests.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/vm/DefaultMethodRegressionTests.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/vm/DefaultMethodsTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/vm/DefaultMethodsTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/jdk/lambda/vm/InterfaceAccessFlagsTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/jdk/lambda/vm/InterfaceAccessFlagsTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/sun/awt/datatransfer/SuplementaryCharactersTransferTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/sun/awt/datatransfer/SuplementaryCharactersTransferTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -146,12 +146,6 @@ } @Override - protected Image platformImageBytesOrStreamToImage(InputStream str, - byte[] bytes, long format) throws IOException { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override protected byte[] imageToPlatformBytes(Image image, long format) throws IOException { throw new UnsupportedOperationException("Not supported yet."); @@ -161,5 +155,10 @@ public ToolkitThreadBlockedHandler getToolkitThreadBlockedHandler() { throw new UnsupportedOperationException("Not supported yet."); } + + @Override + protected Image platformImageBytesToImage(byte[] bytes, long format) throws IOException { + throw new UnsupportedOperationException("Not supported yet."); + } } } \ No newline at end of file
--- a/test/sun/java2d/X11SurfaceData/SharedMemoryPixmapsTest/SharedMemoryPixmapsTest.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/sun/java2d/X11SurfaceData/SharedMemoryPixmapsTest/SharedMemoryPixmapsTest.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -21,7 +22,6 @@ # questions. # -#!/bin/sh # @test # @bug 6363434 6588884 # @summary Verify that shared memory pixmaps are not broken
--- a/test/sun/management/jdp/JdpUnitTest.java Wed May 29 13:22:58 2013 -0300 +++ b/test/sun/management/jdp/JdpUnitTest.java Wed Jun 05 13:10:11 2013 -0300 @@ -32,6 +32,12 @@ public class JdpUnitTest { + + static byte[] russian_name = {(byte)0xd0,(byte)0xbf,(byte)0xd1,(byte)0x80,(byte)0xd0,(byte)0xbe,(byte)0xd0,(byte)0xb2, + (byte)0xd0,(byte)0xb5,(byte)0xd1,(byte)0x80,(byte)0xd0,(byte)0xba,(byte)0xd0,(byte)0xb0, + (byte)0x20,(byte)0xd1,(byte)0x81,(byte)0xd0,(byte)0xb2,(byte)0xd1,(byte)0x8f,(byte)0xd0, + (byte)0xb7,(byte)0xd0,(byte)0xb8,(byte)0x0a}; + /** * This test tests that complete packet is build correctly */ @@ -42,7 +48,7 @@ { JdpJmxPacket p1 = new JdpJmxPacket(UUID.randomUUID(), "fake://unit-test"); p1.setMainClass("FakeUnitTest"); - p1.setInstanceName("Fake"); + p1.setInstanceName( new String(russian_name,"UTF-8")); byte[] b = p1.getPacketData(); JdpJmxPacket p2 = new JdpJmxPacket(b);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/sun/net/www/protocol/http/HttpStreams.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8011719 + * @summary Basic checks to verify behavior of returned input streams + */ + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import java.io.*; +import java.net.*; +import java.nio.charset.StandardCharsets; +import java.util.*; + +public class HttpStreams { + + void client(String u) throws Exception { + byte[] ba = new byte[5]; + HttpURLConnection urlc = (HttpURLConnection)(new URL(u)).openConnection(); + int resp = urlc.getResponseCode(); + InputStream is; + if (resp == 200) + is = urlc.getInputStream(); + else + is = urlc.getErrorStream(); + + expectNoThrow(() -> { is.read(); }, "read on open stream should not throw :" + u); + expectNoThrow(() -> { is.close(); }, "close should never throw: " + u); + expectNoThrow(() -> { is.close(); }, "close should never throw: " + u); + expectThrow(() -> { is.read(); }, "read on closed stream should throw: " + u); + expectThrow(() -> { is.read(ba); }, "read on closed stream should throw: " + u); + expectThrow(() -> { is.read(ba, 0, 2); }, "read on closed stream should throw: " + u); + } + + void test() throws Exception { + HttpServer server = null; + try { + server = startHttpServer(); + String baseUrl = "http://localhost:" + server.getAddress().getPort() + "/"; + client(baseUrl + "chunked/"); + client(baseUrl + "fixed/"); + client(baseUrl + "error/"); + client(baseUrl + "chunkedError/"); + + // Test with a response cache + ResponseCache ch = ResponseCache.getDefault(); + ResponseCache.setDefault(new TrivialCacheHandler()); + try { + client(baseUrl + "chunked/"); + client(baseUrl + "fixed/"); + client(baseUrl + "error/"); + client(baseUrl + "chunkedError/"); + } finally { + ResponseCache.setDefault(ch); + } + } finally { + if (server != null) + server.stop(0); + } + + System.out.println("passed: " + pass + ", failed: " + fail); + if (fail > 0) + throw new RuntimeException("some tests failed check output"); + } + + public static void main(String[] args) throws Exception { + (new HttpStreams()).test(); + } + + // HTTP Server + HttpServer startHttpServer() throws IOException { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(0), 0); + httpServer.createContext("/chunked/", new ChunkedHandler()); + httpServer.createContext("/fixed/", new FixedHandler()); + httpServer.createContext("/error/", new ErrorHandler()); + httpServer.createContext("/chunkedError/", new ChunkedErrorHandler()); + httpServer.start(); + return httpServer; + } + + static abstract class AbstractHandler implements HttpHandler { + @Override + public void handle(HttpExchange t) throws IOException { + try (InputStream is = t.getRequestBody()) { + while (is.read() != -1); + } + t.sendResponseHeaders(respCode(), length()); + try (OutputStream os = t.getResponseBody()) { + os.write(message()); + } + t.close(); + } + + abstract int respCode(); + abstract int length(); + abstract byte[] message(); + } + + static class ChunkedHandler extends AbstractHandler { + static final byte[] ba = + "Hello there from chunked handler!".getBytes(StandardCharsets.US_ASCII); + int respCode() { return 200; } + int length() { return 0; } + byte[] message() { return ba; } + } + + static class FixedHandler extends AbstractHandler { + static final byte[] ba = + "Hello there from fixed handler!".getBytes(StandardCharsets.US_ASCII); + int respCode() { return 200; } + int length() { return ba.length; } + byte[] message() { return ba; } + } + + static class ErrorHandler extends AbstractHandler { + static final byte[] ba = + "This is an error mesg from the server!".getBytes(StandardCharsets.US_ASCII); + int respCode() { return 400; } + int length() { return ba.length; } + byte[] message() { return ba; } + } + + static class ChunkedErrorHandler extends ErrorHandler { + int length() { return 0; } + } + + static class TrivialCacheHandler extends ResponseCache + { + public CacheResponse get(URI uri, String rqstMethod, Map rqstHeaders) { + return null; + } + + public CacheRequest put(URI uri, URLConnection conn) { + return new TrivialCacheRequest(); + } + } + + static class TrivialCacheRequest extends CacheRequest + { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + public void abort() {} + public OutputStream getBody() throws IOException { return baos; } + } + + static interface ThrowableRunnable { + void run() throws IOException; + } + + void expectThrow(ThrowableRunnable r, String msg) { + try { r.run(); fail(msg); } catch (IOException x) { pass(); } + } + + void expectNoThrow(ThrowableRunnable r, String msg) { + try { r.run(); pass(); } catch (IOException x) { fail(msg, x); } + } + + private int pass; + private int fail; + void pass() { pass++; } + void fail(String msg, Exception x) { System.out.println(msg); x.printStackTrace(); fail++; } + void fail(String msg) { System.out.println(msg); Thread.dumpStack(); fail++; } +}
--- a/test/sun/rmi/rmic/manifestClassPath/run.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/sun/rmi/rmic/manifestClassPath/run.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -21,7 +22,6 @@ # questions. # -#!/bin/sh # @test # @bug 6473331 6485027 6934615 # @summary Test handling of the Class-Path attribute in jar file manifests
--- a/test/sun/rmi/rmic/newrmic/equivalence/batch.sh Wed May 29 13:22:58 2013 -0300 +++ b/test/sun/rmi/rmic/newrmic/equivalence/batch.sh Wed Jun 05 13:10:11 2013 -0300 @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -21,7 +22,6 @@ # questions. # -#!/bin/sh # # Usage: batch.sh classpath classes... #
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/sun/security/pkcs11/tls/TestLeadingZeroesP11.java Wed Jun 05 13:10:11 2013 -0300 @@ -0,0 +1,410 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8014618 + * @summary Need to strip leading zeros in TlsPremasterSecret of DHKeyAgreement + * @library .. + * @author Pasi Eronen + */ + +import java.io.*; +import java.security.*; +import java.security.spec.*; +import java.security.interfaces.*; +import javax.crypto.*; +import javax.crypto.spec.*; +import javax.crypto.interfaces.*; + +/** + * Test that leading zeroes are stripped in TlsPremasterSecret case, + * but are left as-is in other cases. + * + * We use pre-generated keypairs, since with randomly generated keypairs, + * a leading zero happens only (roughly) 1 out of 256 cases. + */ + +public class TestLeadingZeroesP11 extends PKCS11Test { + + public static void main(String[] args) throws Exception { + main(new TestLeadingZeroesP11()); + } + + public void main(Provider p) throws Exception { + + // decode pre-generated keypairs + KeyFactory kfac = KeyFactory.getInstance("DH", p); + PublicKey alicePubKey = + kfac.generatePublic(new X509EncodedKeySpec(alicePubKeyEnc)); + PublicKey bobPubKey = + kfac.generatePublic(new X509EncodedKeySpec(bobPubKeyEnc)); + PrivateKey alicePrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(alicePrivKeyEnc)); + PrivateKey bobPrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(bobPrivKeyEnc)); + + // generate normal shared secret + KeyAgreement aliceKeyAgree = KeyAgreement.getInstance("DH", p); + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] sharedSecret = aliceKeyAgree.generateSecret(); + System.out.println("shared secret:\n" + toHexString(sharedSecret)); + + // verify that leading zero is present + if (sharedSecret.length != 128) { + throw new Exception("Unexpected shared secret length"); + } + if (sharedSecret[0] != 0) { + throw new Exception("First byte is not zero as expected"); + } + + // now, test TLS premaster secret + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] tlsPremasterSecret = + aliceKeyAgree.generateSecret("TlsPremasterSecret").getEncoded(); + System.out.println( + "tls premaster secret:\n" + toHexString(tlsPremasterSecret)); + + // check that leading zero has been stripped + if (tlsPremasterSecret.length != 127) { + throw new Exception("Unexpected TLS premaster secret length"); + } + if (tlsPremasterSecret[0] == 0) { + throw new Exception("First byte is zero"); + } + for (int i = 0; i < tlsPremasterSecret.length; i++) { + if (tlsPremasterSecret[i] != sharedSecret[i+1]) { + throw new Exception("Shared secrets differ"); + } + } + + } + + /* + * Converts a byte to hex digit and writes to the supplied buffer + */ + private void byte2hex(byte b, StringBuffer buf) { + char[] hexChars = { '0', '1', '2', '3', '4', '5', '6', '7', '8', + '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + int high = ((b & 0xf0) >> 4); + int low = (b & 0x0f); + buf.append(hexChars[high]); + buf.append(hexChars[low]); + } + + /* + * Converts a byte array to hex string + */ + private String toHexString(byte[] block) { + StringBuffer buf = new StringBuffer(); + + int len = block.length; + + for (int i = 0; i < len; i++) { + byte2hex(block[i], buf); + if (i < len-1) { + buf.append(":"); + } + } + return buf.toString(); + } + + private static final byte alicePubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x24, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x85, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x81, (byte)0x00, + (byte)0xEE, (byte)0xD6, (byte)0xB1, (byte)0xA3, + (byte)0xB4, (byte)0x78, (byte)0x2B, (byte)0x35, + (byte)0xEF, (byte)0xCD, (byte)0x17, (byte)0x86, + (byte)0x63, (byte)0x2B, (byte)0x97, (byte)0x0E, + (byte)0x7A, (byte)0xD1, (byte)0xFF, (byte)0x7A, + (byte)0xEB, (byte)0x57, (byte)0x61, (byte)0xA1, + (byte)0xF7, (byte)0x90, (byte)0x11, (byte)0xA7, + (byte)0x79, (byte)0x28, (byte)0x69, (byte)0xBA, + (byte)0xA7, (byte)0xB2, (byte)0x37, (byte)0x17, + (byte)0xAE, (byte)0x3C, (byte)0x92, (byte)0x89, + (byte)0x88, (byte)0xE5, (byte)0x7E, (byte)0x8E, + (byte)0xF0, (byte)0x24, (byte)0xD0, (byte)0xE1, + (byte)0xC4, (byte)0xB0, (byte)0x26, (byte)0x5A, + (byte)0x1E, (byte)0xBD, (byte)0xA0, (byte)0xCF, + (byte)0x3E, (byte)0x97, (byte)0x2A, (byte)0x13, + (byte)0x92, (byte)0x3B, (byte)0x39, (byte)0xD0, + (byte)0x1D, (byte)0xA3, (byte)0x6B, (byte)0x3E, + (byte)0xC2, (byte)0xBB, (byte)0x14, (byte)0xB6, + (byte)0xE2, (byte)0x4C, (byte)0x0E, (byte)0x5B, + (byte)0x4B, (byte)0xA4, (byte)0x9D, (byte)0xA6, + (byte)0x21, (byte)0xB0, (byte)0xF9, (byte)0xDE, + (byte)0x55, (byte)0xAE, (byte)0x5C, (byte)0x29, + (byte)0x0E, (byte)0xC1, (byte)0xFC, (byte)0xBA, + (byte)0x51, (byte)0xD3, (byte)0xB6, (byte)0x6D, + (byte)0x75, (byte)0x72, (byte)0xDF, (byte)0x43, + (byte)0xAB, (byte)0x94, (byte)0x21, (byte)0x6E, + (byte)0x0C, (byte)0xD1, (byte)0x93, (byte)0x54, + (byte)0x56, (byte)0x7D, (byte)0x4B, (byte)0x90, + (byte)0xF1, (byte)0x94, (byte)0x45, (byte)0xD4, + (byte)0x2A, (byte)0x71, (byte)0xA1, (byte)0xB8, + (byte)0xDD, (byte)0xAA, (byte)0x05, (byte)0xF0, + (byte)0x27, (byte)0x37, (byte)0xBD, (byte)0x44 + }; + + private static final byte alicePrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE3, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x42, + (byte)0x02, (byte)0x40, (byte)0x36, (byte)0x4D, + (byte)0xD0, (byte)0x58, (byte)0x64, (byte)0x91, + (byte)0x78, (byte)0xA2, (byte)0x4B, (byte)0x79, + (byte)0x46, (byte)0xFE, (byte)0xC9, (byte)0xD9, + (byte)0xCA, (byte)0x5C, (byte)0xF9, (byte)0xFD, + (byte)0x6C, (byte)0x5D, (byte)0x76, (byte)0x3A, + (byte)0x41, (byte)0x6D, (byte)0x44, (byte)0x62, + (byte)0x75, (byte)0x93, (byte)0x81, (byte)0x93, + (byte)0x00, (byte)0x4C, (byte)0xB1, (byte)0xD8, + (byte)0x7D, (byte)0x9D, (byte)0xF3, (byte)0x16, + (byte)0x2C, (byte)0x6C, (byte)0x9F, (byte)0x7A, + (byte)0x84, (byte)0xA3, (byte)0x7A, (byte)0xC1, + (byte)0x4F, (byte)0x60, (byte)0xE3, (byte)0xB5, + (byte)0x86, (byte)0x28, (byte)0x08, (byte)0x4D, + (byte)0x94, (byte)0xB6, (byte)0x04, (byte)0x0D, + (byte)0xAC, (byte)0xBD, (byte)0x1F, (byte)0x42, + (byte)0x8F, (byte)0x1B + }; + + private static final byte bobPubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x23, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x84, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x80, (byte)0x2C, + (byte)0x40, (byte)0xFA, (byte)0xF6, (byte)0xA6, + (byte)0xF8, (byte)0xAC, (byte)0xC2, (byte)0x4F, + (byte)0xCD, (byte)0xC7, (byte)0x37, (byte)0x93, + (byte)0xE5, (byte)0xE4, (byte)0x5E, (byte)0x18, + (byte)0x14, (byte)0xE6, (byte)0x50, (byte)0xDA, + (byte)0x55, (byte)0x38, (byte)0x5D, (byte)0x24, + (byte)0xF5, (byte)0x42, (byte)0x68, (byte)0x5F, + (byte)0xF5, (byte)0x15, (byte)0xC8, (byte)0x9B, + (byte)0x5D, (byte)0x06, (byte)0x3D, (byte)0xE1, + (byte)0x52, (byte)0x2F, (byte)0x98, (byte)0xFF, + (byte)0x37, (byte)0xBB, (byte)0x75, (byte)0x48, + (byte)0x48, (byte)0xE9, (byte)0x65, (byte)0x84, + (byte)0x37, (byte)0xBB, (byte)0xB3, (byte)0xE9, + (byte)0x36, (byte)0x01, (byte)0xB4, (byte)0x6A, + (byte)0x1C, (byte)0xB2, (byte)0x11, (byte)0x82, + (byte)0xCE, (byte)0x3D, (byte)0x65, (byte)0xE5, + (byte)0x3C, (byte)0x89, (byte)0xE9, (byte)0x52, + (byte)0x19, (byte)0xBD, (byte)0x58, (byte)0xF6, + (byte)0xA2, (byte)0x03, (byte)0xA8, (byte)0xB2, + (byte)0xA5, (byte)0xDB, (byte)0xEB, (byte)0xF5, + (byte)0x94, (byte)0xF9, (byte)0x46, (byte)0xBE, + (byte)0x45, (byte)0x4C, (byte)0x65, (byte)0xD2, + (byte)0xD1, (byte)0xCF, (byte)0xFF, (byte)0xFF, + (byte)0xFA, (byte)0x38, (byte)0xF1, (byte)0x72, + (byte)0xAB, (byte)0xB9, (byte)0x14, (byte)0x4E, + (byte)0xF5, (byte)0xF0, (byte)0x7A, (byte)0x8E, + (byte)0x45, (byte)0xFD, (byte)0x5B, (byte)0xF9, + (byte)0xA2, (byte)0x97, (byte)0x1B, (byte)0xAE, + (byte)0x2C, (byte)0x7B, (byte)0x6B, (byte)0x7C, + (byte)0x98, (byte)0xFE, (byte)0x58, (byte)0xDD, + (byte)0xBE, (byte)0xF6, (byte)0x1C, (byte)0x8E, + (byte)0xD0, (byte)0xA1, (byte)0x72 + }; + + private static final byte bobPrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE4, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x43, + (byte)0x02, (byte)0x41, (byte)0x00, (byte)0xE0, + (byte)0x31, (byte)0xE7, (byte)0x77, (byte)0xB8, + (byte)0xD0, (byte)0x7E, (byte)0x0A, (byte)0x9B, + (byte)0x94, (byte)0xD5, (byte)0x3D, (byte)0x33, + (byte)0x62, (byte)0x32, (byte)0x51, (byte)0xCE, + (byte)0x74, (byte)0x5C, (byte)0xA5, (byte)0x72, + (byte)0xD9, (byte)0x36, (byte)0xF3, (byte)0x8A, + (byte)0x3F, (byte)0x8B, (byte)0xC6, (byte)0xFE, + (byte)0xEF, (byte)0x94, (byte)0x8B, (byte)0x50, + (byte)0x41, (byte)0x9B, (byte)0x14, (byte)0xC8, + (byte)0xE9, (byte)0x1F, (byte)0x24, (byte)0x1F, + (byte)0x65, (byte)0x8E, (byte)0xD3, (byte)0x85, + (byte)0xD0, (byte)0x68, (byte)0x6C, (byte)0xF1, + (byte)0x79, (byte)0x45, (byte)0xD0, (byte)0x06, + (byte)0xA4, (byte)0xB8, (byte)0xE0, (byte)0x64, + (byte)0xF5, (byte)0x38, (byte)0x72, (byte)0x97, + (byte)0x00, (byte)0x23, (byte)0x5F + }; +} +