changeset 5094:2c971ed884ce jdk7u60-b03

Merge
author asaha
date Wed, 15 Jan 2014 10:53:50 -0800
parents 50f489feeb5e (current diff) 91d016d56832 (diff)
children c4905ac7895c 09e3feebb48f
files src/os/linux/vm/os_linux.cpp src/os/linux/vm/os_linux.hpp src/share/vm/utilities/vmError.cpp
diffstat 100 files changed, 2661 insertions(+), 637 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Jan 15 10:45:35 2014 -0800
+++ b/.hgtags	Wed Jan 15 10:53:50 2014 -0800
@@ -590,6 +590,8 @@
 12374864c655a2cefb0d65caaacf215d5365ec5f jdk7u45-b18
 3677c8cc3c89c0fa608f485b84396e4cf755634b jdk7u45-b30
 520b7b3d9153c1407791325946b07c5c222cf0d6 jdk7u45-b31
+c373a733d5d5147f99eaa2b91d6b937c28214fc9 jdk7u45-b33
+0bcb43482f2ac5615437541ffb8dc0f79ece3148 jdk7u45-b34
 429884602206fcf5314c8b953c06d54d337558ca jdk7u51-b00
 68f03ff066f2341b89b52a6d6e21ae09de008351 jdk7u51-b01
 67910a581eca113847c5320c49436a9816c5d5c6 jdk7u51-b02
@@ -597,3 +599,22 @@
 683458c333ced92d515daa1b9bcdb5be679e535a jdk7u51-b04
 ed2db7a82229e7adbfe8a8166bf98f3ef4a09be5 jdk7u51-b05
 fec027762cf37d033d82d5b3725020f40c771690 jdk7u51-b06
+f673c581ebf91073b5bbdbdc5e4d4407910fa006 jdk7u51-b07
+b0a355aae00427e74cc0b89697c7c7f6fb520176 jdk7u51-b08
+4f56f2e206fd878809f70ca06f4bc21563a7c530 jdk7u51-b09
+1b7aaef3df78970c9a5ef5cc353ca927241555ee jdk7u51-b10
+1f11dff734af98f5bf11d4fceeda221ab1416971 jdk7u51-b11
+dee2a38ef6b26534c44c550ef4da2c3146c612c2 jdk7u51-b12
+6c6a2299029ad02fa2820b8ff8c61c2bbcae799c jdk7u51-b13
+a398ddc79d2310ad37b131cc3794b3cf574f088e jdk7u51-b30
+cf4110c35afb10456d8264c47b7cde1c20150cab jdk7u51-b31
+ae4adc1492d1c90a70bd2d139a939fc0c8329be9 jdk7u60-b00
+af1fc2868a2b919727bfbb0858449bd991bbee4a jdk7u40-b60
+cc83359f5e5eb46dd9176b0a272390b1a0a51fdc hs24.60-b01
+b7d44793cd267b22352c688b0185466741bb7a89 hs24.60-b02
+90cfd4ad3c9263886d876792d72cb24ac0e03a85 hs24.60-b03
+8fd0e931efa57d1579fb1bc8a68ba3924244b99e jdk7u60-b01
+99e96aaac8afc14ce6f9f3d92ef7004cf505b35d hs24.60-b04
+0025a2a965c8f21376278245c2493d8861386fba jdk7u60-b02
+fa59add77d1a8f601a695f137248462fdc68cc2f hs24.60-b05
+a59134ccb1b704b2cd05e157970d425af43e5437 hs24.60-b06
--- a/agent/src/os/linux/ps_core.c	Wed Jan 15 10:45:35 2014 -0800
+++ b/agent/src/os/linux/ps_core.c	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -704,6 +704,8 @@
    ELF_PHDR* phbuf;
    ELF_PHDR* lib_php = NULL;
 
+   int page_size=sysconf(_SC_PAGE_SIZE);
+
    if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
       return false;
 
@@ -712,8 +714,32 @@
    // have been already added from core file segments.
    for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
       if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
-         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
-            goto err;
+         uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+         map_info *existing_map = core_lookup(ph, target_vaddr);
+
+         if (existing_map == NULL) {
+            if (add_map_info(ph, lib_fd, lib_php->p_offset,
+                              target_vaddr, lib_php->p_filesz) == NULL) {
+                goto err;
+            }
+         } else {
+            if ((existing_map->memsz != page_size) &&
+                (existing_map->fd != lib_fd) &&
+                (existing_map->memsz != lib_php->p_filesz)) {
+
+                print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
+                            target_vaddr, lib_php->p_filesz, lib_php->p_flags);
+                goto err;
+            }
+
+            /* replace PT_LOAD segment with library segment */
+            print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+                            existing_map->memsz, lib_php->p_filesz);
+
+            existing_map->fd = lib_fd;
+            existing_map->offset = lib_php->p_offset;
+            existing_map->memsz = lib_php->p_filesz;
+         }
       }
       lib_php++;
    }
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/JVMTIThreadState.java	Wed Jan 15 10:45:35 2014 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/JVMTIThreadState.java	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,11 +29,10 @@
     public static final int JVMTI_THREAD_STATE_ALIVE = 0x0001;
     public static final int JVMTI_THREAD_STATE_TERMINATED = 0x0002;
     public static final int JVMTI_THREAD_STATE_RUNNABLE = 0x0004;
-    public static final int JVMTI_THREAD_STATE_WAITING = 0x0008;
+    public static final int JVMTI_THREAD_STATE_WAITING = 0x0080;
     public static final int JVMTI_THREAD_STATE_WAITING_INDEFINITELY = 0x0010;
     public static final int JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT = 0x0020;
     public static final int JVMTI_THREAD_STATE_SLEEPING = 0x0040;
-    public static final int JVMTI_THREAD_STATE_WAITING_FOR_NOTIFICATION = 0x0080;
     public static final int JVMTI_THREAD_STATE_IN_OBJECT_WAIT = 0x0100;
     public static final int JVMTI_THREAD_STATE_PARKED = 0x0200;
     public static final int JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER = 0x0400;
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/OSThread.java	Wed Jan 15 10:45:35 2014 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/OSThread.java	Wed Jan 15 10:53:50 2014 -0800
@@ -32,7 +32,7 @@
 // to the sys_thread_t structure of the classic JVM implementation.
 public class OSThread extends VMObject {
     private static JIntField interruptedField;
-    private static JIntField threadIdField;
+    private static Field threadIdField;
     static {
         VM.registerVMInitializedObserver(new Observer() {
             public void update(Observable o, Object data) {
@@ -44,7 +44,7 @@
     private static synchronized void initialize(TypeDataBase db) {
         Type type = db.lookupType("OSThread");
         interruptedField = type.getJIntField("_interrupted");
-        threadIdField = type.getJIntField("_thread_id");
+        threadIdField = type.getField("_thread_id");
     }
 
     public OSThread(Address addr) {
@@ -56,7 +56,7 @@
     }
 
     public int threadId() {
-        return (int)threadIdField.getValue(addr);
+        return threadIdField.getJInt(addr);
     }
 
 }
--- a/make/hotspot_version	Wed Jan 15 10:45:35 2014 -0800
+++ b/make/hotspot_version	Wed Jan 15 10:53:50 2014 -0800
@@ -34,8 +34,8 @@
 HOTSPOT_VM_COPYRIGHT=Copyright 2013
 
 HS_MAJOR_VER=24
-HS_MINOR_VER=51
-HS_BUILD_NUMBER=02
+HS_MINOR_VER=60
+HS_BUILD_NUMBER=06
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1686,14 +1686,6 @@
   }
 
   assert_different_registers(obj, k_RInfo, klass_RInfo);
-  if (!k->is_loaded()) {
-    jobject2reg_with_patching(k_RInfo, op->info_for_patch());
-  } else {
-#ifdef _LP64
-    __ movoop(k_RInfo, k->constant_encoding());
-#endif // _LP64
-  }
-  assert(obj != k_RInfo, "must be different");
 
   __ cmpptr(obj, (int32_t)NULL_WORD);
   if (op->should_profile()) {
@@ -1710,6 +1702,14 @@
   } else {
     __ jcc(Assembler::equal, *obj_is_null);
   }
+
+  if (!k->is_loaded()) {
+    jobject2reg_with_patching(k_RInfo, op->info_for_patch());
+  } else {
+#ifdef _LP64
+    __ movoop(k_RInfo, k->constant_encoding());
+#endif // _LP64
+  }
   __ verify_oop(obj);
 
   if (op->fast_check()) {
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1395,19 +1395,18 @@
     addr = new LIR_Address(src.result(), offset, type);
   }
 
-  if (data != dst) {
-    __ move(data, dst);
-    data = dst;
-  }
+  // Because we want a 2-arg form of xchg and xadd
+  __ move(data, dst);
+
   if (x->is_add()) {
-    __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
   } else {
     if (is_obj) {
       // Do the pre-write barrier, if any.
       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
                   true /* do_load */, false /* patch */, NULL);
     }
-    __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
     if (is_obj) {
       // Seems to be a precise address
       post_barrier(LIR_OprFact::address(addr), data);
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/cpu/x86/vm/frame_x86.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -94,12 +94,6 @@
     // other generic buffer blobs are more problematic so we just assume they are
     // ok. adapter blobs never have a frame complete and are never ok.
 
-    // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
-
-    if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) {
-      return false;
-    }
-
     if (!_cb->is_frame_complete_at(_pc)) {
       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
         return false;
@@ -139,6 +133,11 @@
       // must be some sort of compiled/runtime frame
       // fp does not have to be safe (although it could be check for c1?)
 
+      // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
+      if (_cb->frame_size() <= 0) {
+        return false;
+      }
+
       sender_sp = _unextended_sp + _cb->frame_size();
       // On Intel the return_address is always the word on the stack
       sender_pc = (address) *(sender_sp-1);
--- a/src/cpu/x86/vm/globals_x86.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/cpu/x86/vm/globals_x86.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -62,7 +62,7 @@
 // due to lack of optimization caused by C++ compiler bugs
 define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
 #else
-define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
+define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+5));
 #endif // AMD64
 
 define_pd_global(intx, PreInflateSpin,           10);
--- a/src/os/bsd/vm/attachListener_bsd.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/bsd/vm/attachListener_bsd.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -460,14 +460,14 @@
 
 void AttachListener::vm_start() {
   char fn[UNIX_PATH_MAX];
-  struct stat64 st;
+  struct stat st;
   int ret;
 
   int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
            os::get_temp_directory(), os::current_process_id());
   assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
 
-  RESTARTABLE(::stat64(fn, &st), ret);
+  RESTARTABLE(::stat(fn, &st), ret);
   if (ret == 0) {
     ret = ::unlink(fn);
     if (ret == -1) {
--- a/src/os/bsd/vm/os_bsd.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/bsd/vm/os_bsd.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -3434,7 +3434,9 @@
 #endif
 #endif
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  fatal("This code is not used or maintained.");
+
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
@@ -6105,3 +6107,9 @@
   return n;
 }
 
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
+
--- a/src/os/linux/vm/globals_linux.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/linux/vm/globals_linux.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -40,6 +40,9 @@
   product(bool, UseHugeTLBFS, false,                                    \
           "Use MAP_HUGETLB for large pages")                            \
                                                                         \
+  product(bool, UseTransparentHugePages, false,                         \
+          "Use MADV_HUGEPAGE for large pages")                          \
+                                                                        \
   product(bool, LoadExecStackDllInVMThread, true,                       \
           "Load DLLs with executable-stack attribute in the VM Thread") \
                                                                         \
@@ -50,7 +53,7 @@
 // Defines Linux-specific default values. The flags are available on all
 // platforms, but they may have different default values on other platforms.
 //
-define_pd_global(bool, UseLargePages, true);
+define_pd_global(bool, UseLargePages, false);
 define_pd_global(bool, UseLargePagesIndividualAllocation, false);
 define_pd_global(bool, UseOSErrorReporting, false);
 define_pd_global(bool, UseThreadPriorities, true) ;
--- a/src/os/linux/vm/os_linux.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/linux/vm/os_linux.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -144,6 +144,7 @@
 bool os::Linux::_supports_fast_thread_cpu_time = false;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
+pthread_condattr_t os::Linux::_condattr[1];
 
 static jlong initial_time_count=0;
 
@@ -1427,12 +1428,15 @@
           clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
         // yes, monotonic clock is supported
         _clock_gettime = clock_gettime_func;
+        return;
       } else {
         // close librt if there is no monotonic clock
         dlclose(handle);
       }
     }
   }
+  warning("No monotonic clock was available - timed services may " \
+          "be adversely affected if the time-of-day clock changes");
 }
 
 #ifndef SYS_clock_getres
@@ -2748,35 +2752,7 @@
 
 int os::Linux::commit_memory_impl(char* addr, size_t size,
                                   size_t alignment_hint, bool exec) {
-  int err;
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
-    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
-    uintptr_t res =
-      (uintptr_t) ::mmap(addr, size, prot,
-                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
-                         -1, 0);
-    if (res != (uintptr_t) MAP_FAILED) {
-      if (UseNUMAInterleaving) {
-        numa_make_global(addr, size);
-      }
-      return 0;
-    }
-
-    err = errno;  // save errno from mmap() call above
-
-    if (!recoverable_mmap_error(err)) {
-      // However, it is not clear that this loss of our reserved mapping
-      // happens with large pages on Linux or that we cannot recover
-      // from the loss. For now, we just issue a warning and we don't
-      // call vm_exit_out_of_memory(). This issue is being tracked by
-      // JBS-8007074.
-      warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
-//    vm_exit_out_of_memory(size, "committing reserved memory.");
-    }
-    // Fall through and try to use small pages
-  }
-
-  err = os::Linux::commit_memory_impl(addr, size, exec);
+  int err = os::Linux::commit_memory_impl(addr, size, exec);
   if (err == 0) {
     realign_memory(addr, size, alignment_hint);
   }
@@ -2801,7 +2777,7 @@
 }
 
 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
+  if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
     // be supported or the memory may already be backed by huge pages.
     ::madvise(addr, bytes, MADV_HUGEPAGE);
@@ -2814,7 +2790,7 @@
   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
   // small pages on top of the SHM segment. This method always works for small pages, so we
   // allow that in any case.
-  if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
+  if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
     commit_memory(addr, bytes, alignment_hint, !ExecMem);
   }
 }
@@ -3183,11 +3159,31 @@
   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
 }
 
+bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
+  bool result = false;
+  void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE,
+                 -1, 0);
+  if (p != MAP_FAILED) {
+    void *aligned_p = align_ptr_up(p, page_size);
+
+    result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
+
+    munmap(p, page_size * 2);
+  }
+
+  if (warn && !result) {
+    warning("TransparentHugePages is not supported by the operating system.");
+  }
+
+  return result;
+}
+
 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
   bool result = false;
-  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
-                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-                  -1, 0);
+  void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
+                 -1, 0);
 
   if (p != MAP_FAILED) {
     // We don't know if this really is a huge page or not.
@@ -3208,12 +3204,10 @@
       }
       fclose(fp);
     }
-    munmap (p, page_size);
-    if (result)
-      return true;
-  }
-
-  if (warn) {
+    munmap(p, page_size);
+  }
+
+  if (warn && !result) {
     warning("HugeTLBFS is not supported by the operating system.");
   }
 
@@ -3261,82 +3255,126 @@
 
 static size_t _large_page_size = 0;
 
-void os::large_page_init() {
-  if (!UseLargePages) {
-    UseHugeTLBFS = false;
-    UseSHM = false;
-    return;
-  }
-
-  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-    // If UseLargePages is specified on the command line try both methods,
-    // if it's default, then try only HugeTLBFS.
-    if (FLAG_IS_DEFAULT(UseLargePages)) {
-      UseHugeTLBFS = true;
-    } else {
-      UseHugeTLBFS = UseSHM = true;
-    }
-  }
-
-  if (LargePageSizeInBytes) {
-    _large_page_size = LargePageSizeInBytes;
-  } else {
-    // large_page_size on Linux is used to round up heap size. x86 uses either
-    // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
-    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
-    // page as large as 256M.
-    //
-    // Here we try to figure out page size by parsing /proc/meminfo and looking
-    // for a line with the following format:
-    //    Hugepagesize:     2048 kB
-    //
-    // If we can't determine the value (e.g. /proc is not mounted, or the text
-    // format has been changed), we'll use the largest page size supported by
-    // the processor.
+size_t os::Linux::find_large_page_size() {
+  size_t large_page_size = 0;
+
+  // large_page_size on Linux is used to round up heap size. x86 uses either
+  // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
+  // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
+  // page as large as 256M.
+  //
+  // Here we try to figure out page size by parsing /proc/meminfo and looking
+  // for a line with the following format:
+  //    Hugepagesize:     2048 kB
+  //
+  // If we can't determine the value (e.g. /proc is not mounted, or the text
+  // format has been changed), we'll use the largest page size supported by
+  // the processor.
 
 #ifndef ZERO
-    _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
-                       ARM_ONLY(2 * M) PPC_ONLY(4 * M);
+  large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
+                     ARM_ONLY(2 * M) PPC_ONLY(4 * M);
 #endif // ZERO
 
-    FILE *fp = fopen("/proc/meminfo", "r");
-    if (fp) {
-      while (!feof(fp)) {
-        int x = 0;
-        char buf[16];
-        if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
-          if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
-            _large_page_size = x * K;
-            break;
-          }
-        } else {
-          // skip to next line
-          for (;;) {
-            int ch = fgetc(fp);
-            if (ch == EOF || ch == (int)'\n') break;
-          }
+  FILE *fp = fopen("/proc/meminfo", "r");
+  if (fp) {
+    while (!feof(fp)) {
+      int x = 0;
+      char buf[16];
+      if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
+        if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
+          large_page_size = x * K;
+          break;
+        }
+      } else {
+        // skip to next line
+        for (;;) {
+          int ch = fgetc(fp);
+          if (ch == EOF || ch == (int)'\n') break;
         }
       }
-      fclose(fp);
     }
-  }
-
-  // print a warning if any large page related flag is specified on command line
-  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-
+    fclose(fp);
+  }
+
+  if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
+    warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
+        SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
+        proper_unit_for_byte_size(large_page_size));
+  }
+
+  return large_page_size;
+}
+
+size_t os::Linux::setup_large_page_size() {
+  _large_page_size = Linux::find_large_page_size();
   const size_t default_page_size = (size_t)Linux::page_size();
   if (_large_page_size > default_page_size) {
     _page_sizes[0] = _large_page_size;
     _page_sizes[1] = default_page_size;
     _page_sizes[2] = 0;
   }
-  UseHugeTLBFS = UseHugeTLBFS &&
-                 Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-
-  if (UseHugeTLBFS)
+
+  return _large_page_size;
+}
+
+bool os::Linux::setup_large_page_type(size_t page_size) {
+  if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
+      FLAG_IS_DEFAULT(UseSHM) &&
+      FLAG_IS_DEFAULT(UseTransparentHugePages)) {
+
+    // The type of large pages has not been specified by the user.
+
+    // Try UseHugeTLBFS and then UseSHM.
+    UseHugeTLBFS = UseSHM = true;
+
+    // Don't try UseTransparentHugePages since there are known
+    // performance issues with it turned on. This might change in the future.
+    UseTransparentHugePages = false;
+  }
+
+  if (UseTransparentHugePages) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
+    if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
+      UseHugeTLBFS = false;
+      UseSHM = false;
+      return true;
+    }
+    UseTransparentHugePages = false;
+  }
+
+  if (UseHugeTLBFS) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
+    if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
+      UseSHM = false;
+      return true;
+    }
+    UseHugeTLBFS = false;
+  }
+
+  return UseSHM;
+}
+
+void os::large_page_init() {
+  if (!UseLargePages &&
+      !UseTransparentHugePages &&
+      !UseHugeTLBFS &&
+      !UseSHM) {
+    // Not using large pages.
+    return;
+  }
+
+  if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
+    // The user explicitly turned off large pages.
+    // Ignore the rest of the large pages flags.
+    UseTransparentHugePages = false;
+    UseHugeTLBFS = false;
     UseSHM = false;
-
-  UseLargePages = UseHugeTLBFS || UseSHM;
+    return;
+  }
+
+  size_t large_page_size = Linux::setup_large_page_size();
+  UseLargePages          = Linux::setup_large_page_type(large_page_size);
 
   set_coredump_filter();
 }
@@ -3345,16 +3383,22 @@
 #define SHM_HUGETLB 04000
 #endif
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   key_t key = IPC_PRIVATE;
   char *addr;
 
   bool warn_on_failure = UseLargePages &&
                         (!FLAG_IS_DEFAULT(UseLargePages) ||
+                         !FLAG_IS_DEFAULT(UseSHM) ||
                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
                         );
   char msg[128];
@@ -3402,42 +3446,220 @@
      return NULL;
   }
 
-  if ((addr != NULL) && UseNUMAInterleaving) {
-    numa_make_global(addr, bytes);
-  }
-
-  // The memory is committed
-  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  return addr;
+}
+
+static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
+  assert(error == ENOMEM, "Only expect to fail if no memory is available");
+
+  bool warn_on_failure = UseLargePages &&
+      (!FLAG_IS_DEFAULT(UseLargePages) ||
+       !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
+       !FLAG_IS_DEFAULT(LargePageSizeInBytes));
+
+  if (warn_on_failure) {
+    char msg[128];
+    jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
+        PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
+    warning(msg);
+  }
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+  char* addr = (char*)::mmap(req_addr, bytes, prot,
+                             MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
+                             -1, 0);
+
+  if (addr == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
 
   return addr;
 }
 
+char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  size_t large_page_size = os::large_page_size();
+
+  assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
+
+  // Allocate small pages.
+
+  char* start;
+  if (req_addr != NULL) {
+    assert(is_ptr_aligned(req_addr, alignment), "Must be");
+    assert(is_size_aligned(bytes, alignment), "Must be");
+    start = os::reserve_memory(bytes, req_addr);
+    assert(start == NULL || start == req_addr, "Must be");
+  } else {
+    start = os::reserve_memory_aligned(bytes, alignment);
+  }
+
+  if (start == NULL) {
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(start, alignment), "Must be");
+
+  // os::reserve_memory_special will record this memory area.
+  // Need to release it here to prevent overlapping reservations.
+  MemTracker::record_virtual_memory_release((address)start, bytes);
+
+  char* end = start + bytes;
+
+  // Find the regions of the allocated chunk that can be promoted to large pages.
+  char* lp_start = (char*)align_ptr_up(start, large_page_size);
+  char* lp_end   = (char*)align_ptr_down(end, large_page_size);
+
+  size_t lp_bytes = lp_end - lp_start;
+
+  assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
+
+  if (lp_bytes == 0) {
+    // The mapped region doesn't even span the start and the end of a large page.
+    // Fall back to allocate a non-special area.
+    ::munmap(start, end - start);
+    return NULL;
+  }
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+
+
+  void* result;
+
+  if (start != lp_start) {
+    result = ::mmap(start, lp_start - start, prot,
+                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                    -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(lp_start, end - lp_start);
+      return NULL;
+    }
+  }
+
+  result = ::mmap(lp_start, lp_bytes, prot,
+                  MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
+                  -1, 0);
+  if (result == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    // If the mmap above fails, the large pages region will be unmapped and we
+    // have regions before and after with small pages. Release these regions.
+    //
+    // |  mapped  |  unmapped  |  mapped  |
+    // ^          ^            ^          ^
+    // start      lp_start     lp_end     end
+    //
+    ::munmap(start, lp_start - start);
+    ::munmap(lp_end, end - lp_end);
+    return NULL;
+  }
+
+  if (lp_end != end) {
+      result = ::mmap(lp_end, end - lp_end, prot,
+                      MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                      -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(start, lp_end - start);
+      return NULL;
+    }
+  }
+
+  return start;
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_ptr_aligned(req_addr, alignment), "Must be");
+  assert(is_power_of_2(alignment), "Must be");
+  assert(is_power_of_2(os::large_page_size()), "Must be");
+  assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
+
+  if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
+    return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
+  } else {
+    return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
+  }
+}
+
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  char* addr;
+  if (UseSHM) {
+    addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
+  }
+
+  if (addr != NULL) {
+    if (UseNUMAInterleaving) {
+      numa_make_global(addr, bytes);
+    }
+
+    // The memory is committed
+    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  }
+
+  return addr;
+}
+
+bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
+  // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
+  return shmdt(base) == 0;
+}
+
+bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
+  return pd_release_memory(base, bytes);
+}
+
 bool os::release_memory_special(char* base, size_t bytes) {
+  assert(UseLargePages, "only for large pages");
+
   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  // detaching the SHM segment will also delete it, see reserve_memory_special()
-  int rslt = shmdt(base);
-  if (rslt == 0) {
+
+  bool res;
+  if (UseSHM) {
+    res = os::Linux::release_memory_special_shm(base, bytes);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
+  }
+
+  if (res) {
     tkr.record((address)base, bytes);
-    return true;
   } else {
     tkr.discard();
-   return false;
-  }
-}
+  }
+
+  return res;
+}
+
 
 size_t os::large_page_size() {
   return _large_page_size;
 }
 
-// HugeTLBFS allows application to commit large page memory on demand;
-// with SysV SHM the entire memory region must be allocated as shared
+// With SysV SHM the entire memory region must be allocated as shared
 // memory.
+// HugeTLBFS allows application to commit large page memory on demand.
+// However, when committing memory with HugeTLBFS fails, the region
+// that was supposed to be committed will lose the old reservation
+// and allow other threads to steal that memory region. Because of this
+// behavior we can't commit HugeTLBFS memory.
 bool os::can_commit_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages;
 }
 
 bool os::can_execute_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages || UseHugeTLBFS;
 }
 
 // Reserve memory at an arbitrary address, only if that area is
@@ -4493,6 +4715,26 @@
 
   Linux::clock_init();
   initial_time_count = os::elapsed_counter();
+
+  // pthread_condattr initialization for monotonic clock
+  int status;
+  pthread_condattr_t* _condattr = os::Linux::condAttr();
+  if ((status = pthread_condattr_init(_condattr)) != 0) {
+    fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
+  }
+  // Only set the clock if CLOCK_MONOTONIC is available
+  if (Linux::supports_monotonic_clock()) {
+    if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
+      if (status == EINVAL) {
+        warning("Unable to use monotonic clock with relative timed-waits" \
+                " - changes to the time-of-day clock may have adverse affects");
+      } else {
+        fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
+      }
+    }
+  }
+  // else it defaults to CLOCK_REALTIME
+
   pthread_mutex_init(&dl_mutex, NULL);
 
   // If the pagesize of the VM is greater than 8K determine the appropriate
@@ -4591,21 +4833,23 @@
         UseNUMA = false;
       }
     }
-    // With SHM large pages we cannot uncommit a page, so there's not way
+    // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
     // we can make the adaptive lgrp chunk resizing work. If the user specified
-    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
+    // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
     // disable adaptive resizing.
-    if (UseNUMA && UseLargePages && UseSHM) {
-      if (!FLAG_IS_DEFAULT(UseNUMA)) {
-        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
+    if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
+      if (FLAG_IS_DEFAULT(UseNUMA)) {
+        UseNUMA = false;
+      } else {
+        if (FLAG_IS_DEFAULT(UseLargePages) &&
+            FLAG_IS_DEFAULT(UseSHM) &&
+            FLAG_IS_DEFAULT(UseHugeTLBFS)) {
           UseLargePages = false;
         } else {
-          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
+          warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
           UseAdaptiveSizePolicy = false;
           UseAdaptiveNUMAChunkSizing = false;
         }
-      } else {
-        UseNUMA = false;
       }
     }
     if (!UseNUMA && ForceNUMA) {
@@ -5339,21 +5583,36 @@
 
 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
   if (millis < 0)  millis = 0;
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
+
   jlong seconds = millis / 1000;
   millis %= 1000;
   if (seconds > 50000000) { // see man cond_timedwait(3T)
     seconds = 50000000;
   }
-  abstime->tv_sec = now.tv_sec  + seconds;
-  long       usec = now.tv_usec + millis * 1000;
-  if (usec >= 1000000) {
-    abstime->tv_sec += 1;
-    usec -= 1000000;
-  }
-  abstime->tv_nsec = usec * 1000;
+
+  if (os::Linux::supports_monotonic_clock()) {
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
+    if (nanos >= NANOSECS_PER_SEC) {
+      abstime->tv_sec += 1;
+      nanos -= NANOSECS_PER_SEC;
+    }
+    abstime->tv_nsec = nanos;
+  } else {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long usec = now.tv_usec + millis * 1000;
+    if (usec >= 1000000) {
+      abstime->tv_sec += 1;
+      usec -= 1000000;
+    }
+    abstime->tv_nsec = usec * 1000;
+  }
   return abstime;
 }
 
@@ -5445,7 +5704,7 @@
     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy (_cond);
-      pthread_cond_init (_cond, NULL) ;
+      pthread_cond_init (_cond, os::Linux::condAttr()) ;
     }
     assert_status(status == 0 || status == EINTR ||
                   status == ETIME || status == ETIMEDOUT,
@@ -5546,32 +5805,50 @@
 
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
   assert (time > 0, "convertTime");
-
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
-
-  time_t max_secs = now.tv_sec + MAX_SECS;
-
-  if (isAbsolute) {
-    jlong secs = time / 1000;
-    if (secs > max_secs) {
-      absTime->tv_sec = max_secs;
+  time_t max_secs = 0;
+
+  if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+
+    max_secs = now.tv_sec + MAX_SECS;
+
+    if (isAbsolute) {
+      jlong secs = time / 1000;
+      if (secs > max_secs) {
+        absTime->tv_sec = max_secs;
+      } else {
+        absTime->tv_sec = secs;
+      }
+      absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
+    } else {
+      jlong secs = time / NANOSECS_PER_SEC;
+      if (secs >= MAX_SECS) {
+        absTime->tv_sec = max_secs;
+        absTime->tv_nsec = 0;
+      } else {
+        absTime->tv_sec = now.tv_sec + secs;
+        absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+        if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
+          absTime->tv_nsec -= NANOSECS_PER_SEC;
+          ++absTime->tv_sec; // note: this must be <= max_secs
+        }
+      }
     }
-    else {
-      absTime->tv_sec = secs;
-    }
-    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
-  }
-  else {
+  } else {
+    // must be relative using monotonic clock
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    max_secs = now.tv_sec + MAX_SECS;
     jlong secs = time / NANOSECS_PER_SEC;
     if (secs >= MAX_SECS) {
       absTime->tv_sec = max_secs;
       absTime->tv_nsec = 0;
-    }
-    else {
+    } else {
       absTime->tv_sec = now.tv_sec + secs;
-      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
         absTime->tv_nsec -= NANOSECS_PER_SEC;
         ++absTime->tv_sec; // note: this must be <= max_secs
@@ -5650,16 +5927,20 @@
   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
   jt->set_suspend_equivalent();
   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
-
+  assert(_cur_index == -1, "invariant");
   if (time == 0) {
-    status = pthread_cond_wait (_cond, _mutex) ;
+    _cur_index = REL_INDEX; // arbitrary choice when not timed
+    status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
   } else {
-    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
+    _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
+    status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy (_cond) ;
-      pthread_cond_init    (_cond, NULL);
+      pthread_cond_destroy (&_cond[_cur_index]) ;
+      pthread_cond_init    (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
     }
   }
+  _cur_index = -1;
+
   assert_status(status == 0 || status == EINTR ||
                 status == ETIME || status == ETIMEDOUT,
                 status, "cond_timedwait");
@@ -5688,17 +5969,24 @@
   s = _counter;
   _counter = 1;
   if (s < 1) {
-     if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
+    // thread might be parked
+    if (_cur_index != -1) {
+      // thread is definitely parked
+      if (WorkAroundNPTLTimedWaitHang) {
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-     } else {
+        assert (status == 0, "invariant");
+      } else {
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
-     }
+        assert (status == 0, "invariant");
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
+      }
+    } else {
+      pthread_mutex_unlock(_mutex);
+      assert (status == 0, "invariant") ;
+    }
   } else {
     pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
@@ -5914,3 +6202,149 @@
 }
 
 #endif // JAVASE_EMBEDDED
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+  do {\
+    if (VerboseInternalVMTests) { \
+      tty->print_cr(__VA_ARGS__); \
+      tty->flush(); \
+    }\
+  } while (false)
+
+class TestReserveMemorySpecial : AllStatic {
+ public:
+  static void small_page_write(void* addr, size_t size) {
+    size_t page_size = os::vm_page_size();
+
+    char* end = (char*)addr + size;
+    for (char* p = (char*)addr; p < end; p += page_size) {
+      *p = 1;
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    size_t lp = os::large_page_size();
+
+    for (size_t size = lp; size <= lp * 10; size += lp) {
+      test_reserve_memory_special_huge_tlbfs_only(size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
+    if (!UseHugeTLBFS) {
+        return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
+        size, alignment);
+
+    assert(size >= os::large_page_size(), "Incorrect input to test");
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+      test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_reserve_memory_special_huge_tlbfs_only();
+    test_reserve_memory_special_huge_tlbfs_mixed();
+  }
+
+  static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
+    if (!UseSHM) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
+
+    char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      assert(is_ptr_aligned(addr, alignment), "Check");
+      assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
+
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_shm(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_shm() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t size = ag; size < lp * 3; size += ag) {
+      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+        test_reserve_memory_special_shm(size, alignment);
+      }
+    }
+  }
+
+  static void test() {
+    test_reserve_memory_special_huge_tlbfs();
+    test_reserve_memory_special_shm();
+  }
+};
+
+void TestReserveMemorySpecial_test() {
+  TestReserveMemorySpecial::test();
+}
+
+#endif
--- a/src/os/linux/vm/os_linux.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/linux/vm/os_linux.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -32,6 +32,7 @@
 
 class Linux {
   friend class os;
+  friend class TestReserveMemorySpecial;
 
   // For signal-chaining
 #define MAXSIGNUM 32
@@ -92,8 +93,21 @@
   static void rebuild_cpu_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
 
+  static size_t find_large_page_size();
+  static size_t setup_large_page_size();
+
+  static bool setup_large_page_type(size_t page_size);
+  static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
   static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
 
+  static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
+
+  static bool release_memory_special_shm(char* base, size_t bytes);
+  static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
+
   static void print_full_memory_info(outputStream* st);
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
@@ -207,6 +221,13 @@
 
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
+  // pthread_cond clock suppport
+  private:
+  static pthread_condattr_t _condattr[1];
+
+  public:
+  static pthread_condattr_t* condAttr() { return _condattr; }
+
   // Stack repair handling
 
   // none present
@@ -273,7 +294,7 @@
   public:
     PlatformEvent() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
+      status = pthread_cond_init (_cond, os::Linux::condAttr());
       assert_status(status == 0, status, "cond_init");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
@@ -288,14 +309,19 @@
     void park () ;
     void unpark () ;
     int  TryPark () ;
-    int  park (jlong millis) ;
+    int  park (jlong millis) ; // relative timed-wait only
     void SetAssociation (Thread * a) { _Assoc = a ; }
 } ;
 
 class PlatformParker : public CHeapObj<mtInternal> {
   protected:
+    enum {
+      REL_INDEX = 0,
+      ABS_INDEX = 1
+    };
+    int _cur_index; // which cond is in use: -1, 0, 1
     pthread_mutex_t _mutex [1] ;
-    pthread_cond_t  _cond  [1] ;
+    pthread_cond_t _cond [2] ; // one for relative times and one for abs.
 
   public:       // TODO-FIXME: make dtor private
     ~PlatformParker() { guarantee (0, "invariant") ; }
@@ -303,10 +329,13 @@
   public:
     PlatformParker() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
+      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
+      assert_status(status == 0, status, "cond_init rel");
+      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
+      assert_status(status == 0, status, "cond_init abs");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
+      _cur_index = -1; // mark as unused
     }
 };
 
--- a/src/os/posix/vm/os_posix.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/posix/vm/os_posix.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@
 #include <unistd.h>
 #include <sys/resource.h>
 #include <sys/utsname.h>
+#include <pthread.h>
+#include <signal.h>
 
 
 // Check core dump limit and report possible place where core can be found
@@ -203,11 +205,17 @@
  * The callback is supposed to provide the method that should be protected.
  */
 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
+  sigset_t saved_sig_mask;
+
   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
       "crash_protection already set?");
 
-  if (sigsetjmp(_jmpbuf, 1) == 0) {
+  // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
+  // since on at least some systems (OS X) siglongjmp will restore the mask
+  // for the process, not the thread
+  pthread_sigmask(0, NULL, &saved_sig_mask);
+  if (sigsetjmp(_jmpbuf, 0) == 0) {
     // make sure we can see in the signal handler that we have crash protection
     // installed
     WatcherThread::watcher_thread()->set_crash_protection(this);
@@ -217,6 +225,7 @@
     return true;
   }
   // this happens when we siglongjmp() back
+  pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
   WatcherThread::watcher_thread()->set_crash_protection(NULL);
   return false;
 }
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/solaris/vm/os_solaris.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -3530,11 +3530,15 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseISM, "only for ISM large pages");
 
+  if (!is_size_aligned(size, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
+
   char* retAddr = NULL;
   int shmid;
   key_t ismKey;
@@ -6862,3 +6866,9 @@
 
   return strlen(buffer);
 }
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/os/windows/vm/os_windows.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/os/windows/vm/os_windows.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -3079,7 +3079,12 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
@@ -5584,3 +5589,9 @@
 }
 
 #endif
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -4178,7 +4178,9 @@
     }
   }
 
-  if (!PrintInlining)  return;
+  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+    return;
+  }
   CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
   if (success && CIPrintMethodCodes) {
     callee->print_codes();
--- a/src/share/vm/c1/c1_LIR.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/c1/c1_LIR.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -2221,7 +2221,7 @@
   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
 
   enum {
-    maxNumberOfOperands = 16,
+    maxNumberOfOperands = 20,
     maxNumberOfInfos = 4
   };
 
--- a/src/share/vm/c1/c1_LinearScan.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1138,8 +1138,10 @@
         }
       }
     }
-
-  } else if (opr_type != T_LONG) {
+    // We want to sometimes use logical operations on pointers, in particular in GC barriers.
+    // Since 64bit logical operations do not current support operands on stack, we have to make sure
+    // T_OBJECT doesn't get spilled along with T_LONG.
+  } else if (opr_type != T_LONG LP64_ONLY(&& opr_type != T_OBJECT)) {
     // integer instruction (note: long operands must always be in register)
     switch (op->code()) {
       case lir_cmp:
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/classfile/classFileParser.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -4051,8 +4051,8 @@
   for (int index = 0; index < num_methods; index++) {
     methodOop m = (methodOop)methods->obj_at(index);
 
-    // skip static and <init> methods
-    if ((!m->is_static()) &&
+    // skip private, static, and <init> methods
+    if ((!m->is_private() && !m->is_static()) &&
         (m->name() != vmSymbols::object_initializer_name())) {
 
       Symbol* name = m->name();
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -585,7 +585,7 @@
   assert(name != NULL && !FieldType::is_array(name) &&
          !FieldType::is_obj(name), "invalid class name");
 
-  TracingTime class_load_start_time = Tracing::time();
+  const Ticks class_load_start_time = Ticks::now();
 
   // UseNewReflection
   // Fix for 4474172; see evaluation for more details
@@ -946,7 +946,7 @@
                                         TRAPS) {
   TempNewSymbol parsed_name = NULL;
 
-  TracingTime class_load_start_time = Tracing::time();
+  const Ticks class_load_start_time = Ticks::now();
 
   // Parse the stream. Note that we do this even though this klass might
   // already be present in the SystemDictionary, otherwise we would not
@@ -2314,6 +2314,11 @@
   objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
   assert(appendix_box->obj_at(0) == NULL, "");
 
+  // This should not happen.  JDK code should take care of that.
+  if (accessing_klass.is_null() || method_type.is_null()) {
+    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokehandle", empty);
+  }
+
   // call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName
   JavaCallArguments args;
   args.push_oop(accessing_klass()->java_mirror());
@@ -2439,6 +2444,9 @@
   Handle type;
   if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
     type = find_method_handle_type(signature, caller, CHECK_(empty));
+  } else if (caller.is_null()) {
+    // This should not happen.  JDK code should take care of that.
+    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad MH constant", empty);
   } else {
     ResourceMark rm(THREAD);
     SignatureStream ss(signature, false);
@@ -2502,6 +2510,11 @@
   Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
   Handle method_type = find_method_handle_type(type, caller, CHECK_(empty));
 
+  // This should not happen.  JDK code should take care of that.
+  if (caller.is_null() || method_type.is_null()) {
+    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokedynamic", empty);
+  }
+
   objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
   assert(appendix_box->obj_at(0) == NULL, "");
 
@@ -2607,13 +2620,12 @@
 }
 
 // utility function for posting class load event
-void SystemDictionary::post_class_load_event(TracingTime start_time,
+void SystemDictionary::post_class_load_event(const Ticks& start_time,
                                              instanceKlassHandle k,
                                              Handle initiating_loader) {
 #if INCLUDE_TRACE
   EventClassLoad event(UNTIMED);
   if (event.should_commit()) {
-    event.set_endtime(Tracing::time());
     event.set_starttime(start_time);
     event.set_loadedClass(k());
     oop defining_class_loader = k->class_loader();
@@ -2632,7 +2644,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   if (Tracing::enabled()) {
     _should_write_unload_events = Tracing::is_event_enabled(TraceClassUnloadEvent);
-    _class_unload_time = Tracing::time();
+    _class_unload_time = Ticks::now();
     _is_alive = is_alive;
     classes_do(&class_unload_event);
 
@@ -2648,7 +2660,7 @@
 
 #if INCLUDE_TRACE
 
-TracingTime SystemDictionary::_class_unload_time;
+Ticks SystemDictionary::_class_unload_time;
 BoolObjectClosure* SystemDictionary::_is_alive = NULL;
 int SystemDictionary::_no_of_classes_unloading = 0;
 bool SystemDictionary::_should_write_unload_events = false;
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/classfile/systemDictionary.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -33,7 +33,7 @@
 #include "runtime/reflectionUtils.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/hashtable.inline.hpp"
-#include "trace/traceTime.hpp"
+#include "utilities/ticks.hpp"
 
 // The system dictionary stores all loaded classes and maps:
 //
@@ -616,7 +616,7 @@
   static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
 
   // event based tracing
-  static void post_class_load_event(TracingTime start_time, instanceKlassHandle k,
+  static void post_class_load_event(const Ticks& start_time, instanceKlassHandle k,
                                     Handle initiating_loader);
   static void post_class_unload_events(BoolObjectClosure* is_alive);
 
@@ -678,7 +678,7 @@
   static bool _has_checkPackageAccess;
 
 #if INCLUDE_TRACE
-  static TracingTime _class_unload_time;
+  static Ticks _class_unload_time;
   static BoolObjectClosure* _is_alive;
   static int _no_of_classes_unloading;
   static bool _should_write_unload_events;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1997,7 +1997,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
-  gc_timer->register_gc_start(os::elapsed_counter());
+  gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
@@ -2094,7 +2094,7 @@
     size_policy()->msc_collection_end(gch->gc_cause());
   }
 
-  gc_timer->register_gc_end(os::elapsed_counter());
+  gc_timer->register_gc_end();
 
   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 
@@ -2443,7 +2443,7 @@
 
 void CMSCollector::register_gc_start(GCCause::Cause cause) {
   _cms_start_registered = true;
-  _gc_timer_cm->register_gc_start(os::elapsed_counter());
+  _gc_timer_cm->register_gc_start();
   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
 }
 
@@ -2451,7 +2451,7 @@
   if (_cms_start_registered) {
     report_heap_summary(GCWhen::AfterGC);
 
-    _gc_timer_cm->register_gc_end(os::elapsed_counter());
+    _gc_timer_cm->register_gc_end();
     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
     _cms_start_registered = false;
   }
@@ -9432,4 +9432,3 @@
       ShouldNotReachHere();
   }
 }
-
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,7 +145,7 @@
                                 );
 #endif /* USDT2 */
 
-  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark", os::elapsed_counter());
+  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
@@ -157,7 +157,7 @@
 
   VM_CMS_Operation::verify_after_gc();
 
-  _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
+  _collector->_gc_timer_cm->register_gc_pause_end();
 
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__initmark__end);
@@ -182,7 +182,7 @@
                                 );
 #endif /* USDT2 */
 
-  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark", os::elapsed_counter());
+  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
@@ -195,7 +195,7 @@
   VM_CMS_Operation::verify_after_gc();
 
   _collector->save_heap_summary();
-  _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
+  _collector->_gc_timer_cm->register_gc_pause_end();
 
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__remark__end);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -56,6 +56,7 @@
 #include "oops/oop.pcgc.inline.hpp"
 #include "runtime/aprofiler.hpp"
 #include "runtime/vmThread.hpp"
+#include "utilities/ticks.hpp"
 
 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
 
@@ -1286,7 +1287,7 @@
   }
 
   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
-  gc_timer->register_gc_start(os::elapsed_counter());
+  gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
@@ -1546,8 +1547,7 @@
     post_full_gc_dump(gc_timer);
   }
 
-  gc_timer->register_gc_end(os::elapsed_counter());
-
+  gc_timer->register_gc_end();
   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 
   return true;
@@ -2002,10 +2002,12 @@
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
   size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t heap_alignment = collector_policy()->max_alignment();
 
   // Ensure that the sizes are properly aligned.
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
+  Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
 
   _cg1r = new ConcurrentG1Refine(this);
 
@@ -2029,14 +2031,14 @@
   size_t total_reserved = 0;
 
   total_reserved = add_and_check_overflow(total_reserved, max_byte_size);
-  size_t pg_max_size = (size_t) align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
+  size_t pg_max_size = (size_t) align_size_up(pgs->max_size(), heap_alignment);
   total_reserved = add_and_check_overflow(total_reserved, pg_max_size);
 
   Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
 
-  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
-
-  ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
+  char* addr = Universe::preferred_heap_base(total_reserved, heap_alignment, Universe::UnscaledNarrowOop);
+
+  ReservedHeapSpace heap_rs(total_reserved, heap_alignment,
                             UseLargePages, addr);
 
   if (UseCompressedOops) {
@@ -2044,17 +2046,17 @@
       // Failed to reserve at specified address - the requested memory
       // region is taken already, for example, by 'java' launcher.
       // Try again to reserver heap higher.
-      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
-
-      ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
+      addr = Universe::preferred_heap_base(total_reserved, heap_alignment, Universe::ZeroBasedNarrowOop);
+
+      ReservedHeapSpace heap_rs0(total_reserved, heap_alignment,
                                  UseLargePages, addr);
 
       if (addr != NULL && !heap_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
-        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+        addr = Universe::preferred_heap_base(total_reserved, heap_alignment, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
 
-        ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
+        ReservedHeapSpace heap_rs1(total_reserved, heap_alignment,
                                    UseLargePages, addr);
         heap_rs = heap_rs1;
       } else {
@@ -2517,7 +2519,7 @@
   FullGCCount_lock->notify_all();
 }
 
-void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
+void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
   _concurrent_cycle_started = true;
   _gc_timer_cm->register_gc_start(start_time);
 
@@ -2527,7 +2529,7 @@
 
 void G1CollectedHeap::register_concurrent_cycle_end() {
   if (_concurrent_cycle_started) {
-    _gc_timer_cm->register_gc_end(os::elapsed_counter());
+    _gc_timer_cm->register_gc_end();
 
     if (_cm->has_aborted()) {
       _gc_tracer_cm->report_concurrent_mode_failure();
@@ -3815,7 +3817,7 @@
     return false;
   }
 
-  _gc_timer_stw->register_gc_start(os::elapsed_counter());
+  _gc_timer_stw->register_gc_start();
 
   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
 
@@ -4193,7 +4195,7 @@
 
     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
-    _gc_timer_stw->register_gc_end(os::elapsed_counter());
+    _gc_timer_stw->register_gc_end();
     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
   }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -69,6 +69,7 @@
 class G1NewTracer;
 class G1OldTracer;
 class EvacuationFailedInfo;
+class Ticks;
 
 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@@ -747,7 +748,7 @@
     return _old_marking_cycles_completed;
   }
 
-  void register_concurrent_cycle_start(jlong start_time);
+  void register_concurrent_cycle_start(const Ticks& start_time);
   void register_concurrent_cycle_end();
   void trace_heap_after_concurrent_cycle();
 
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -310,7 +310,8 @@
 void G1CollectorPolicy::initialize_flags() {
   set_min_alignment(HeapRegion::GrainBytes);
   size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-  set_max_alignment(MAX2(card_table_alignment, min_alignment()));
+  size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+  set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -923,7 +923,7 @@
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  _gc_timer->register_gc_start(os::elapsed_counter());
+  _gc_timer->register_gc_start();
 
   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
     "not a CMS generational heap");
@@ -1100,7 +1100,7 @@
   gch->trace_heap_after_gc(&gc_tracer);
   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 
-  _gc_timer->register_gc_end(os::elapsed_counter());
+  _gc_timer->register_gc_end();
 
   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -90,14 +90,16 @@
                   og_min_size, og_max_size,
                   yg_min_size, yg_max_size);
 
-  // The ReservedSpace ctor used below requires that the page size for the perm
-  // gen is <= the page size for the rest of the heap (young + old gens).
   const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
                                                      yg_max_size + og_max_size,
                                                      8);
-  const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
-                                                          pg_max_size, 16),
-                                 og_page_sz);
+
+  // Use the same page size for both perm gen and old gen,
+  // to allow large pages to be allocated when the heap is reserved
+  // for the implementations that can't 'commit' large pages.
+  // NEEDS_CLEANUP. ReservedHeapSpace/ReservedSpace that takes both
+  // a prefix and a suffix alignment can now be removed.
+  const size_t pg_page_sz = og_page_sz;
 
   const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
   const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
@@ -138,12 +140,9 @@
   total_reserved = add_and_check_overflow(total_reserved, og_max_size);
   total_reserved = add_and_check_overflow(total_reserved, yg_max_size);
 
-  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
+  assert(is_size_aligned(total_reserved, og_align), "Must be");
 
-  // The main part of the heap (old gen + young gen) can often use a larger page
-  // size than is needed or wanted for the perm gen.  Use the "compound
-  // alignment" ReservedSpace ctor to avoid having to use the same page size for
-  // all gens.
+  char* addr = Universe::preferred_heap_base(total_reserved, og_align, Universe::UnscaledNarrowOop);
 
   ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
                             og_align, addr);
@@ -153,12 +152,12 @@
       // Failed to reserve at specified address - the requested memory
       // region is taken already, for example, by 'java' launcher.
       // Try again to reserver heap higher.
-      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+      addr = Universe::preferred_heap_base(total_reserved, og_align, Universe::ZeroBasedNarrowOop);
       ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
                                  og_align, addr);
       if (addr != NULL && !heap_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
-        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+        addr = Universe::preferred_heap_base(total_reserved, og_align, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
         ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
                                    og_align, addr);
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -116,7 +116,7 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   GCCause::Cause gc_cause = heap->gc_cause();
 
-  _gc_timer->register_gc_start(os::elapsed_counter());
+  _gc_timer->register_gc_start();
   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 
   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
@@ -386,7 +386,7 @@
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
-  _gc_timer->register_gc_end(os::elapsed_counter());
+  _gc_timer->register_gc_end();
 
   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2041,7 +2041,7 @@
 
   ParallelScavengeHeap* heap = gc_heap();
 
-  _gc_timer.register_gc_start(os::elapsed_counter());
+  _gc_timer.register_gc_start();
   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 
   TimeStamp marking_start;
@@ -2285,7 +2285,7 @@
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
-  _gc_timer.register_gc_end(os::elapsed_counter());
+  _gc_timer.register_gc_end();
 
   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -266,7 +266,7 @@
   assert(_preserved_mark_stack.is_empty(), "should be empty");
   assert(_preserved_oop_stack.is_empty(), "should be empty");
 
-  _gc_timer.register_gc_start(os::elapsed_counter());
+  _gc_timer.register_gc_start();
 
   TimeStamp scavenge_entry;
   TimeStamp scavenge_midpoint;
@@ -689,7 +689,7 @@
 #endif
 
 
-  _gc_timer.register_gc_end(os::elapsed_counter());
+  _gc_timer.register_gc_end();
 
   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
 
--- a/src/share/vm/gc_implementation/shared/gcTimer.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/gcTimer.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,52 +25,55 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/ticks.inline.hpp"
 
-void GCTimer::register_gc_start(jlong time) {
+// the "time" parameter for most functions
+// has a default value set by Ticks::now()
+
+void GCTimer::register_gc_start(const Ticks& time) {
   _time_partitions.clear();
   _gc_start = time;
 }
 
-void GCTimer::register_gc_end(jlong time) {
+void GCTimer::register_gc_end(const Ticks& time) {
   assert(!_time_partitions.has_active_phases(),
       "We should have ended all started phases, before ending the GC");
 
   _gc_end = time;
 }
 
-void GCTimer::register_gc_pause_start(const char* name, jlong time) {
+void GCTimer::register_gc_pause_start(const char* name, const Ticks& time) {
   _time_partitions.report_gc_phase_start(name, time);
 }
 
-void GCTimer::register_gc_pause_end(jlong time) {
+void GCTimer::register_gc_pause_end(const Ticks& time) {
   _time_partitions.report_gc_phase_end(time);
 }
 
-void GCTimer::register_gc_phase_start(const char* name, jlong time) {
+void GCTimer::register_gc_phase_start(const char* name, const Ticks& time) {
   _time_partitions.report_gc_phase_start(name, time);
 }
 
-void GCTimer::register_gc_phase_end(jlong time) {
+void GCTimer::register_gc_phase_end(const Ticks& time) {
   _time_partitions.report_gc_phase_end(time);
 }
 
-
-void STWGCTimer::register_gc_start(jlong time) {
+void STWGCTimer::register_gc_start(const Ticks& time) {
   GCTimer::register_gc_start(time);
   register_gc_pause_start("GC Pause", time);
 }
 
-void STWGCTimer::register_gc_end(jlong time) {
+void STWGCTimer::register_gc_end(const Ticks& time) {
   register_gc_pause_end(time);
   GCTimer::register_gc_end(time);
 }
 
-void ConcurrentGCTimer::register_gc_pause_start(const char* name, jlong time) {
-  GCTimer::register_gc_pause_start(name, time);
+void ConcurrentGCTimer::register_gc_pause_start(const char* name) {
+  GCTimer::register_gc_pause_start(name);
 }
 
-void ConcurrentGCTimer::register_gc_pause_end(jlong time) {
-  GCTimer::register_gc_pause_end(time);
+void ConcurrentGCTimer::register_gc_pause_end() {
+  GCTimer::register_gc_pause_end();
 }
 
 void PhasesStack::clear() {
@@ -111,11 +114,11 @@
 void TimePartitions::clear() {
   _phases->clear();
   _active_phases.clear();
-  _sum_of_pauses = 0;
-  _longest_pause = 0;
+  _sum_of_pauses = Tickspan();
+  _longest_pause = Tickspan();
 }
 
-void TimePartitions::report_gc_phase_start(const char* name, jlong time) {
+void TimePartitions::report_gc_phase_start(const char* name, const Ticks& time) {
   assert(_phases->length() <= 1000, "Too many recored phases?");
 
   int level = _active_phases.count();
@@ -133,13 +136,13 @@
 void TimePartitions::update_statistics(GCPhase* phase) {
   // FIXME: This should only be done for pause phases
   if (phase->level() == 0) {
-    jlong pause = phase->end() - phase->start();
+    const Tickspan pause = phase->end() - phase->start();
     _sum_of_pauses += pause;
     _longest_pause = MAX2(pause, _longest_pause);
   }
 }
 
-void TimePartitions::report_gc_phase_end(jlong time) {
+void TimePartitions::report_gc_phase_end(const Ticks& time) {
   int phase_index = _active_phases.pop();
   GCPhase* phase = _phases->adr_at(phase_index);
   phase->set_end(time);
@@ -157,14 +160,6 @@
   return _phases->adr_at(index);
 }
 
-jlong TimePartitions::sum_of_pauses() {
-  return _sum_of_pauses;
-}
-
-jlong TimePartitions::longest_pause() {
-  return _longest_pause;
-}
-
 bool TimePartitions::has_active_phases() {
   return _active_phases.count() > 0;
 }
@@ -194,7 +189,7 @@
     max_nested_pause_phases();
   }
 
-  static void validate_pause_phase(GCPhase* phase, int level, const char* name, jlong start, jlong end) {
+  static void validate_pause_phase(GCPhase* phase, int level, const char* name, const Ticks& start, const Ticks& end) {
     assert(phase->level() == level, "Incorrect level");
     assert(strcmp(phase->name(), name) == 0, "Incorrect name");
     assert(phase->start() == start, "Incorrect start");
@@ -209,8 +204,8 @@
     TimePartitionPhasesIterator iter(&time_partitions);
 
     validate_pause_phase(iter.next(), 0, "PausePhase", 2, 8);
-    assert(time_partitions.sum_of_pauses() == 8-2, "Incorrect");
-    assert(time_partitions.longest_pause() == 8-2, "Incorrect");
+    assert(time_partitions.sum_of_pauses() == Ticks(8) - Ticks(2), "Incorrect");
+    assert(time_partitions.longest_pause() == Ticks(8) - Ticks(2), "Incorrect");
 
     assert(!iter.has_next(), "Too many elements");
   }
@@ -227,8 +222,8 @@
     validate_pause_phase(iter.next(), 0, "PausePhase1", 2, 3);
     validate_pause_phase(iter.next(), 0, "PausePhase2", 4, 6);
 
-    assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
-    assert(time_partitions.longest_pause() == 2, "Incorrect");
+    assert(time_partitions.sum_of_pauses() == Ticks(3) - Ticks(0), "Incorrect");
+    assert(time_partitions.longest_pause() == Ticks(2) - Ticks(0), "Incorrect");
 
     assert(!iter.has_next(), "Too many elements");
   }
@@ -245,8 +240,8 @@
     validate_pause_phase(iter.next(), 0, "PausePhase", 2, 5);
     validate_pause_phase(iter.next(), 1, "SubPhase", 3, 4);
 
-    assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
-    assert(time_partitions.longest_pause() == 3, "Incorrect");
+    assert(time_partitions.sum_of_pauses() == Ticks(3) - Ticks(0), "Incorrect");
+    assert(time_partitions.longest_pause() == Ticks(3) - Ticks(0), "Incorrect");
 
     assert(!iter.has_next(), "Too many elements");
   }
@@ -269,8 +264,8 @@
     validate_pause_phase(iter.next(), 2, "SubPhase2", 4, 7);
     validate_pause_phase(iter.next(), 3, "SubPhase3", 5, 6);
 
-    assert(time_partitions.sum_of_pauses() == 7, "Incorrect");
-    assert(time_partitions.longest_pause() == 7, "Incorrect");
+    assert(time_partitions.sum_of_pauses() == Ticks(7) - Ticks(0), "Incorrect");
+    assert(time_partitions.longest_pause() == Ticks(7) - Ticks(0), "Incorrect");
 
     assert(!iter.has_next(), "Too many elements");
   }
@@ -298,8 +293,8 @@
     validate_pause_phase(iter.next(), 1, "SubPhase3", 7, 8);
     validate_pause_phase(iter.next(), 1, "SubPhase4", 9, 10);
 
-    assert(time_partitions.sum_of_pauses() == 9, "Incorrect");
-    assert(time_partitions.longest_pause() == 9, "Incorrect");
+    assert(time_partitions.sum_of_pauses() == Ticks(9) - Ticks(0), "Incorrect");
+    assert(time_partitions.longest_pause() == Ticks(9) - Ticks(0), "Incorrect");
 
     assert(!iter.has_next(), "Too many elements");
   }
@@ -336,8 +331,8 @@
     validate_pause_phase(iter.next(), 2, "SubPhase22", 12, 13);
     validate_pause_phase(iter.next(), 1, "SubPhase3", 15, 16);
 
-    assert(time_partitions.sum_of_pauses() == 15, "Incorrect");
-    assert(time_partitions.longest_pause() == 15, "Incorrect");
+    assert(time_partitions.sum_of_pauses() == Ticks(15) - Ticks(0), "Incorrect");
+    assert(time_partitions.longest_pause() == Ticks(15) - Ticks(0), "Incorrect");
 
     assert(!iter.has_next(), "Too many elements");
   }
--- a/src/share/vm/gc_implementation/shared/gcTimer.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/gcTimer.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -28,6 +28,7 @@
 #include "memory/allocation.hpp"
 #include "prims/jni_md.h"
 #include "utilities/macros.hpp"
+#include "utilities/ticks.hpp"
 
 class ConcurrentPhase;
 class GCPhase;
@@ -45,21 +46,21 @@
 class GCPhase {
   const char* _name;
   int _level;
-  jlong _start;
-  jlong _end;
+  Ticks _start;
+  Ticks _end;
 
  public:
   void set_name(const char* name) { _name = name; }
-  const char* name() { return _name; }
+  const char* name() const { return _name; }
 
-  int level() { return _level; }
+  int level() const { return _level; }
   void set_level(int level) { _level = level; }
 
-  jlong start() { return _start; }
-  void set_start(jlong time) { _start = time; }
+  const Ticks start() const { return _start; }
+  void set_start(const Ticks& time) { _start = time; }
 
-  jlong end() { return _end; }
-  void set_end(jlong time) { _end = time; }
+  const Ticks end() const { return _end; }
+  void set_end(const Ticks& time) { _end = time; }
 
   virtual void accept(PhaseVisitor* visitor) = 0;
 };
@@ -102,22 +103,22 @@
   GrowableArray<PausePhase>* _phases;
   PhasesStack _active_phases;
 
-  jlong _sum_of_pauses;
-  jlong _longest_pause;
+  Tickspan _sum_of_pauses;
+  Tickspan _longest_pause;
 
  public:
   TimePartitions();
   ~TimePartitions();
   void clear();
 
-  void report_gc_phase_start(const char* name, jlong time);
-  void report_gc_phase_end(jlong time);
+  void report_gc_phase_start(const char* name, const Ticks& time);
+  void report_gc_phase_end(const Ticks& time);
 
   int num_phases() const;
   GCPhase* phase_at(int index) const;
 
-  jlong sum_of_pauses();
-  jlong longest_pause();
+  const Tickspan sum_of_pauses() const { return _sum_of_pauses; }
+  const Tickspan longest_pause() const { return _longest_pause; }
 
   bool has_active_phases();
  private:
@@ -133,40 +134,37 @@
 class GCTimer : public ResourceObj {
   NOT_PRODUCT(friend class GCTimerTest;)
  protected:
-  jlong _gc_start;
-  jlong _gc_end;
+  Ticks _gc_start;
+  Ticks _gc_end;
   TimePartitions _time_partitions;
 
  public:
-  virtual void register_gc_start(jlong time);
-  virtual void register_gc_end(jlong time);
+  virtual void register_gc_start(const Ticks& time = Ticks::now());
+  virtual void register_gc_end(const Ticks& time = Ticks::now());
 
-  void register_gc_phase_start(const char* name, jlong time);
-  void register_gc_phase_end(jlong time);
+  void register_gc_phase_start(const char* name, const Ticks& time);
+  void register_gc_phase_end(const Ticks& time);
 
-  jlong gc_start() { return _gc_start; }
-  jlong gc_end() { return _gc_end; }
+  const Ticks gc_start() const { return _gc_start; }
+  const Ticks gc_end() const { return _gc_end; }
 
   TimePartitions* time_partitions() { return &_time_partitions; }
 
-  long longest_pause();
-  long sum_of_pauses();
-
  protected:
-  void register_gc_pause_start(const char* name, jlong time);
-  void register_gc_pause_end(jlong time);
+  void register_gc_pause_start(const char* name, const Ticks& time = Ticks::now());
+  void register_gc_pause_end(const Ticks& time = Ticks::now());
 };
 
 class STWGCTimer : public GCTimer {
  public:
-  virtual void register_gc_start(jlong time);
-  virtual void register_gc_end(jlong time);
+  virtual void register_gc_start(const Ticks& time = Ticks::now());
+  virtual void register_gc_end(const Ticks& time = Ticks::now());
 };
 
 class ConcurrentGCTimer : public GCTimer {
  public:
-  void register_gc_pause_start(const char* name, jlong time);
-  void register_gc_pause_end(jlong time);
+  void register_gc_pause_start(const char* name);
+  void register_gc_pause_end();
 };
 
 class TimePartitionPhasesIterator {
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -33,6 +33,7 @@
 #include "memory/referenceProcessorStats.hpp"
 #include "runtime/os.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/ticks.inline.hpp"
 
 #ifndef SERIALGC
 #include "gc_implementation/g1/evacuationInfo.hpp"
@@ -46,7 +47,7 @@
   return GCTracer_next_gc_id++;
 }
 
-void GCTracer::report_gc_start_impl(GCCause::Cause cause, jlong timestamp) {
+void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
   assert_unset_gc_id();
 
   GCId gc_id = create_new_gc_id();
@@ -55,7 +56,7 @@
   _shared_gc_info.set_start_timestamp(timestamp);
 }
 
-void GCTracer::report_gc_start(GCCause::Cause cause, jlong timestamp) {
+void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {
   assert_unset_gc_id();
 
   report_gc_start_impl(cause, timestamp);
@@ -65,7 +66,7 @@
   return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID;
 }
 
-void GCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
 
   _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
@@ -76,7 +77,7 @@
   send_garbage_collection_event();
 }
 
-void GCTracer::report_gc_end(jlong timestamp, TimePartitions* time_partitions) {
+void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
 
   report_gc_end_impl(timestamp, time_partitions);
@@ -97,10 +98,10 @@
   const GCId _gc_id;
   const double _size_threshold_percentage;
   const size_t _total_size_in_words;
-  const jlong _timestamp;
+  const Ticks _timestamp;
 
  public:
-  ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, jlong timestamp) :
+  ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, const Ticks& timestamp) :
     _gc_id(gc_id),
     _size_threshold_percentage(ObjectCountCutOffPercent / 100),
     _total_size_in_words(total_size_in_words),
@@ -154,8 +155,7 @@
       ObjectCountFilter object_filter(is_alive_cl);
       HeapInspection::populate_table(&cit, false, &object_filter);
 
-      jlong timestamp = os::elapsed_counter();
-      ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), timestamp);
+      ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), Ticks::now());
       cit.iterate(&event_sender);
     }
   }
@@ -168,7 +168,7 @@
   send_perm_gen_summary_event(when, perm_gen_summary);
 }
 
-void YoungGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
   assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
 
@@ -188,14 +188,14 @@
   _tenuring_threshold = tenuring_threshold;
 }
 
-void OldGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
 
   GCTracer::report_gc_end_impl(timestamp, time_partitions);
   send_old_gc_event();
 }
 
-void ParallelOldTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
 
   OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
@@ -221,7 +221,7 @@
   _g1_young_gc_info.set_type(type);
 }
 
-void G1NewTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
 
   YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -34,6 +34,7 @@
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #endif
+#include "utilities/ticks.hpp"
 
 typedef uint GCId;
 
@@ -46,8 +47,6 @@
 class BoolObjectClosure;
 
 class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
-  static const jlong UNSET_TIMESTAMP = -1;
-
  public:
   static const GCId UNSET_GCID = (GCId)-1;
 
@@ -55,23 +54,30 @@
   GCId _id;
   GCName _name;
   GCCause::Cause _cause;
-  jlong _start_timestamp;
-  jlong _end_timestamp;
-  jlong _sum_of_pauses;
-  jlong _longest_pause;
+  Ticks     _start_timestamp;
+  Ticks     _end_timestamp;
+  Tickspan  _sum_of_pauses;
+  Tickspan  _longest_pause;
 
  public:
-  SharedGCInfo(GCName name) : _id(UNSET_GCID), _name(name), _cause(GCCause::_last_gc_cause),
-      _start_timestamp(UNSET_TIMESTAMP), _end_timestamp(UNSET_TIMESTAMP), _sum_of_pauses(0), _longest_pause(0) {}
+  SharedGCInfo(GCName name) :
+    _id(UNSET_GCID),
+    _name(name),
+    _cause(GCCause::_last_gc_cause),
+    _start_timestamp(),
+    _end_timestamp(),
+    _sum_of_pauses(),
+    _longest_pause() {
+  }
 
   void set_id(GCId id) { _id = id; }
   GCId id() const { return _id; }
 
-  void set_start_timestamp(jlong timestamp) { _start_timestamp = timestamp; }
-  jlong start_timestamp() const { return _start_timestamp; }
+  void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; }
+  const Ticks start_timestamp() const { return _start_timestamp; }
 
-  void set_end_timestamp(jlong timestamp) { _end_timestamp = timestamp; }
-  jlong end_timestamp() const { return _end_timestamp; }
+  void set_end_timestamp(const Ticks& timestamp) { _end_timestamp = timestamp; }
+  const Ticks end_timestamp() const { return _end_timestamp; }
 
   void set_name(GCName name) { _name = name; }
   GCName name() const { return _name; }
@@ -79,11 +85,11 @@
   void set_cause(GCCause::Cause cause) { _cause = cause; }
   GCCause::Cause cause() const { return _cause; }
 
-  void set_sum_of_pauses(jlong duration) { _sum_of_pauses = duration; }
-  jlong sum_of_pauses() const { return _sum_of_pauses; }
+  void set_sum_of_pauses(const Tickspan& duration) { _sum_of_pauses = duration; }
+  const Tickspan sum_of_pauses() const { return _sum_of_pauses; }
 
-  void set_longest_pause(jlong duration) { _longest_pause = duration; }
-  jlong longest_pause() const { return _longest_pause; }
+  void set_longest_pause(const Tickspan& duration) { _longest_pause = duration; }
+  const Tickspan longest_pause() const { return _longest_pause; }
 };
 
 class ParallelOldGCInfo VALUE_OBJ_CLASS_SPEC {
@@ -115,8 +121,8 @@
   SharedGCInfo _shared_gc_info;
 
  public:
-  void report_gc_start(GCCause::Cause cause, jlong timestamp);
-  void report_gc_end(jlong timestamp, TimePartitions* time_partitions);
+  void report_gc_start(GCCause::Cause cause, const Ticks& timestamp);
+  void report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions);
   void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const PermGenSummary& perm_gen_summary) const;
   void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
   void report_object_count_after_gc(BoolObjectClosure* object_filter);
@@ -125,8 +131,8 @@
 
  protected:
   GCTracer(GCName name) : _shared_gc_info(name) {}
-  virtual void report_gc_start_impl(GCCause::Cause cause, jlong timestamp);
-  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+  virtual void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
+  virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
 
  private:
   void send_garbage_collection_event() const;
@@ -143,7 +149,7 @@
 
  protected:
   YoungGCTracer(GCName name) : GCTracer(name), _tenuring_threshold(UNSET_TENURING_THRESHOLD) {}
-  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+  virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
 
  public:
   void report_promotion_failed(const PromotionFailedInfo& pf_info);
@@ -157,7 +163,7 @@
 class OldGCTracer : public GCTracer {
  protected:
   OldGCTracer(GCName name) : GCTracer(name) {}
-  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+  virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
 
  public:
   void report_concurrent_mode_failure();
@@ -175,7 +181,7 @@
   void report_dense_prefix(void* dense_prefix);
 
  protected:
-  void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+  void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
 
  private:
   void send_parallel_old_event() const;
@@ -209,7 +215,7 @@
   G1NewTracer() : YoungGCTracer(G1New) {}
 
   void report_yc_type(G1YCType type);
-  void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+  void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
   void report_evacuation_info(EvacuationInfo* info);
   void report_evacuation_failed(EvacuationFailedInfo& ef_info);
 
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -28,8 +28,10 @@
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "runtime/os.hpp"
 #include "trace/traceBackend.hpp"
 #include "trace/tracing.hpp"
+
 #ifndef SERIALGC
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,11 +43,13 @@
 # include "thread_bsd.inline.hpp"
 #endif
 
+#include "utilities/ticks.inline.hpp"
+
 
 GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
-    _title(title), _doit(doit), _print_cr(print_cr), _timer(timer) {
+    _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() {
   if (_doit || _timer != NULL) {
-    _start_counter = os::elapsed_counter();
+    _start_counter.stamp();
   }
 
   if (_timer != NULL) {
@@ -66,10 +68,10 @@
 }
 
 GCTraceTime::~GCTraceTime() {
-  jlong stop_counter = 0;
+  Ticks stop_counter;
 
   if (_doit || _timer != NULL) {
-    stop_counter = os::elapsed_counter();
+    stop_counter.stamp();
   }
 
   if (_timer != NULL) {
@@ -77,11 +79,12 @@
   }
 
   if (_doit) {
-    double seconds = TimeHelper::counter_to_seconds(stop_counter - _start_counter);
+    const Tickspan duration = stop_counter - _start_counter;
+    double duration_in_seconds = TicksToTimeHelper::seconds(duration);
     if (_print_cr) {
-      gclog_or_tty->print_cr(", %3.7f secs]", seconds);
+      gclog_or_tty->print_cr(", %3.7f secs]", duration_in_seconds);
     } else {
-      gclog_or_tty->print(", %3.7f secs]", seconds);
+      gclog_or_tty->print(", %3.7f secs]", duration_in_seconds);
     }
     gclog_or_tty->flush();
   }
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
 
 #include "prims/jni_md.h"
+#include "utilities/ticks.hpp"
 
 class GCTimer;
 
@@ -34,7 +35,7 @@
   bool _doit;
   bool _print_cr;
   GCTimer* _timer;
-  jlong _start_counter;
+  Ticks _start_counter;
 
  public:
   GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer);
--- a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -28,8 +28,10 @@
 #include "memory/heapInspection.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/ticks.hpp"
 
-void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) {
+void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) {
+#if INCLUDE_TRACE
   assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
          "Only call this method if the event is enabled");
 
@@ -40,6 +42,8 @@
   event.set_totalSize(entry->words() * BytesPerWord);
   event.set_endtime(timestamp);
   event.commit();
+
+#endif
 }
 
 bool ObjectCountEventSender::should_send_event() {
--- a/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -29,10 +29,11 @@
 #include "memory/allocation.hpp"
 
 class KlassInfoEntry;
+class Ticks;
 
 class ObjectCountEventSender : public AllStatic {
  public:
-  static void send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp);
+  static void send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp);
   static bool should_send_event();
 };
 
--- a/src/share/vm/memory/collectorPolicy.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/memory/collectorPolicy.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -230,6 +230,8 @@
       alignment = lcm(os::large_page_size(), alignment);
   }
 
+  assert(alignment >= min_alignment(), "Must be");
+
   return alignment;
 }
 
--- a/src/share/vm/memory/defNewGeneration.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/memory/defNewGeneration.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -538,7 +538,7 @@
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  _gc_timer->register_gc_start(os::elapsed_counter());
+  _gc_timer->register_gc_start();
   DefNewTracer gc_tracer;
   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 
@@ -682,7 +682,7 @@
   gch->trace_heap_after_gc(&gc_tracer);
   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 
-  _gc_timer->register_gc_end(os::elapsed_counter());
+  _gc_timer->register_gc_end();
 
   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 }
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -99,17 +99,19 @@
   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 
   // The heap must be at least as aligned as generations.
-  size_t alignment = Generation::GenGrain;
+  size_t gen_alignment = Generation::GenGrain;
 
   _gen_specs = gen_policy()->generations();
   PermanentGenerationSpec *perm_gen_spec =
                                 collector_policy()->permanent_generation();
 
+  size_t heap_alignment = collector_policy()->max_alignment();
+
   // Make sure the sizes are all aligned.
   for (i = 0; i < _n_gens; i++) {
-    _gen_specs[i]->align(alignment);
+    _gen_specs[i]->align(gen_alignment);
   }
-  perm_gen_spec->align(alignment);
+  perm_gen_spec->align(heap_alignment);
 
   // If we are dumping the heap, then allocate a wasted block of address
   // space in order to push the heap to a lower address.  This extra
@@ -130,9 +132,9 @@
   char* heap_address;
   size_t total_reserved = 0;
   int n_covered_regions = 0;
-  ReservedSpace heap_rs(0);
+  ReservedSpace heap_rs;
 
-  heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
+  heap_address = allocate(heap_alignment, perm_gen_spec, &total_reserved,
                           &n_covered_regions, &heap_rs);
 
   if (UseSharedSpaces) {
@@ -142,7 +144,7 @@
       }
       FileMapInfo* mapinfo = FileMapInfo::current_info();
       mapinfo->fail_continue("Unable to reserve shared region.");
-      allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
+      allocate(heap_alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
                &heap_rs);
     }
   }
@@ -207,19 +209,21 @@
   const size_t pageSize = UseLargePages ?
       os::large_page_size() : os::vm_page_size();
 
+  assert(alignment % pageSize == 0, "Must be");
+
   for (int i = 0; i < _n_gens; i++) {
     total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
     n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
 
-  assert(total_reserved % pageSize == 0,
-         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
-                 SIZE_FORMAT, total_reserved, pageSize));
+  assert(total_reserved % alignment == 0,
+         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
+                 SIZE_FORMAT, total_reserved, alignment));
   total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
-  assert(total_reserved % pageSize == 0,
-         err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
+  assert(total_reserved % alignment == 0,
+         err_msg("Perm size; total_reserved=" SIZE_FORMAT ", alignment="
                  SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
-                 pageSize, perm_gen_spec->max_size()));
+                 alignment, perm_gen_spec->max_size()));
 
   n_covered_regions += perm_gen_spec->n_covered_regions();
 
@@ -229,7 +233,9 @@
   total_reserved = add_and_check_overflow(total_reserved, misc);
 
   if (UseLargePages) {
+    assert(misc == 0, "CDS does not support Large Pages");
     assert(total_reserved != 0, "total_reserved cannot be 0");
+    assert(is_size_aligned(total_reserved, os::large_page_size()), "Must be");
     total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
   }
 
@@ -250,7 +256,7 @@
   } else {
     heap_address = NULL;  // any address will do.
     if (UseCompressedOops) {
-      heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
+      heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
       *_total_reserved = total_reserved;
       *_n_covered_regions = n_covered_regions;
       *heap_rs = ReservedHeapSpace(total_reserved, alignment,
@@ -260,13 +266,13 @@
         // Failed to reserve at specified address - the requested memory
         // region is taken already, for example, by 'java' launcher.
         // Try again to reserver heap higher.
-        heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+        heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
         *heap_rs = ReservedHeapSpace(total_reserved, alignment,
                                      UseLargePages, heap_address);
 
         if (heap_address != NULL && !heap_rs->is_reserved()) {
           // Failed to reserve at specified address again - give up.
-          heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+          heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
           assert(heap_address == NULL, "");
           *heap_rs = ReservedHeapSpace(total_reserved, alignment,
                                        UseLargePages, heap_address);
--- a/src/share/vm/memory/generation.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/memory/generation.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -476,16 +476,16 @@
     x(ref_processor(), gch->reserved_region());
 
   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
-  gc_timer->register_gc_start(os::elapsed_counter());
+  gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 
   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
 
-  gc_timer->register_gc_end(os::elapsed_counter());
+  gc_timer->register_gc_end();
 
-  gc_tracer->report_gc_end(os::elapsed_counter(), gc_timer->time_partitions());
+  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 
   SpecializationStats::print();
 }
--- a/src/share/vm/memory/universe.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/memory/universe.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -826,17 +826,23 @@
 // 32Gb
 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
-char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
+  assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
+  assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+  assert(is_size_aligned(heap_size, alignment), "Must be");
+
+  uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
+
   size_t base = 0;
 #ifdef _LP64
   if (UseCompressedOops) {
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
-    const size_t total_size = heap_size + HeapBaseMinAddress;
+    const size_t total_size = heap_size + heap_base_min_address_aligned;
     // Return specified base for the first request.
     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
-      base = HeapBaseMinAddress;
+      base = heap_base_min_address_aligned;
     } else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
       if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
           (Universe::narrow_oop_shift() == 0)) {
@@ -882,6 +888,8 @@
     }
   }
 #endif
+
+  assert(is_ptr_aligned((char*)base, alignment), "Must be");
   return (char*)base; // also return NULL (don't care) for 32-bit VM
 }
 
--- a/src/share/vm/memory/universe.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/memory/universe.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -380,7 +380,7 @@
 
   static NARROW_OOP_MODE narrow_oop_mode();
 
-  static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
+  static char* preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
 
   // Historic gc information
   static size_t get_heap_capacity_at_last_gc()         { return _heap_capacity_at_last_gc; }
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -103,7 +103,7 @@
   // Allows targeted inlining
   if(callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method is hot: ");
     }
@@ -117,7 +117,7 @@
   if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
      size < InlineThrowMaxSize ) {
     wci_result->set_profit(wci_result->profit() * 100);
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
     }
@@ -468,7 +468,7 @@
       C->log()->inline_fail(inline_msg);
     }
   }
-  if (PrintInlining) {
+  if (C->print_inlining()) {
     C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
     if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
     if (Verbose && callee_method) {
@@ -517,7 +517,7 @@
 
 #ifndef PRODUCT
   if (UseOldInlining && InlineWarmCalls
-      && (PrintOpto || PrintOptoInlining || PrintInlining)) {
+      && (PrintOpto || C->print_inlining())) {
     bool cold = wci.is_cold();
     bool hot  = !cold && wci.is_hot();
     bool old_cold = !success;
@@ -594,7 +594,7 @@
              callee_method->is_compiled_lambda_form()) {
       max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
     }
-    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+    if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr(" \\-> discounting inline depth");
     }
--- a/src/share/vm/opto/c2_globals.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/c2_globals.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -173,6 +173,9 @@
   product_pd(intx,  LoopUnrollLimit,                                        \
           "Unroll loop bodies with node count less than this")              \
                                                                             \
+  product(intx,  LoopMaxUnroll, 16,                                         \
+          "Maximum number of unrolls for main loop")                        \
+                                                                            \
   product(intx,  LoopUnrollMin, 4,                                          \
           "Minimum number of unroll loop bodies before checking progress"   \
           "of rounds of unroll,optimize,..")                                \
--- a/src/share/vm/opto/callGenerator.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/callGenerator.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -158,8 +158,9 @@
   virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
 
   static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
-    if (PrintInlining)
+    if (C->print_inlining()) {
       C->print_inlining(callee, inline_level, bci, msg);
+    }
   }
 };
 
--- a/src/share/vm/opto/coalesce.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/coalesce.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -135,20 +135,26 @@
 // After cloning some rematerialized instruction, clone any MachProj's that
 // follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
 // use G3 as an address temp.
-int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
-  Block *bcon = _cfg._bbs[con->_idx];
-  uint cindex = bcon->find_node(con);
-  Node *con_next = bcon->_nodes[cindex+1];
-  if( con_next->in(0) != con || !con_next->is_MachProj() )
-    return false;               // No MachProj's follow
-
-  // Copy kills after the cloned constant
-  Node *kills = con_next->clone();
-  kills->set_req( 0, copy );
-  b->_nodes.insert( idx, kills );
-  _cfg._bbs.map( kills->_idx, b );
-  new_lrg( kills, maxlrg++ );
-  return true;
+int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
+  assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
+  DEBUG_ONLY( Block* borig = _cfg._bbs[orig->_idx]; )
+  int found_projs = 0;
+  uint cnt = orig->outcnt();
+  for (uint i = 0; i < cnt; i++) {
+    Node* proj = orig->raw_out(i);
+    if (proj->is_MachProj()) {
+      assert(proj->outcnt() == 0, "only kill projections are expected here");
+      assert(_cfg._bbs[proj->_idx] == borig, "incorrect block for kill projections");
+      found_projs++;
+      // Copy kill projections after the cloned node
+      Node* kills = proj->clone();
+      kills->set_req(0, copy);
+      b->_nodes.insert(idx++, kills);
+      _cfg._bbs.map(kills->_idx, b);
+      new_lrg(kills, max_lrg_id++);
+    }
+  }
+  return found_projs;
 }
 
 //------------------------------compact----------------------------------------
@@ -464,8 +470,7 @@
               copy = m->clone();
               // Insert the copy in the basic block, just before us
               b->_nodes.insert( l++, copy );
-              if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) )
-                l++;
+              l += _phc.clone_projs(b, l, m, copy, _phc._maxlrg);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
               copy = new (C) MachSpillCopyNode( m, *rm, *rm );
--- a/src/share/vm/opto/compile.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/compile.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -642,7 +642,7 @@
                   _inlining_progress(false),
                   _inlining_incrementally(false),
                   _print_inlining_list(NULL),
-                  _print_inlining(0) {
+                  _print_inlining_idx(0) {
   C = this;
 
   CompileWrapper cw(this);
@@ -667,6 +667,8 @@
   set_print_assembly(print_opto_assembly);
   set_parsed_irreducible_loop(false);
 #endif
+  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
+  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 
   if (ProfileTraps) {
     // Make sure the method being compiled gets its own MDO,
@@ -698,7 +700,7 @@
   PhaseGVN gvn(node_arena(), estimated_size);
   set_initial_gvn(&gvn);
 
-  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
   }
   { // Scope for timing the parser
@@ -832,6 +834,7 @@
   }
 #endif
 
+  NOT_PRODUCT( verify_barriers(); )
   // Now that we know the size of all the monitors we can add a fixed slot
   // for the original deopt pc.
 
@@ -924,7 +927,7 @@
     _inlining_progress(false),
     _inlining_incrementally(false),
     _print_inlining_list(NULL),
-    _print_inlining(0) {
+    _print_inlining_idx(0) {
   C = this;
 
 #ifndef PRODUCT
@@ -3237,6 +3240,72 @@
     }
   }
 }
+
+// Verify GC barriers consistency
+// Currently supported:
+// - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
+void Compile::verify_barriers() {
+  if (UseG1GC) {
+    // Verify G1 pre-barriers
+    const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
+
+    ResourceArea *area = Thread::current()->resource_area();
+    Unique_Node_List visited(area);
+    Node_List worklist(area);
+    // We're going to walk control flow backwards starting from the Root
+    worklist.push(_root);
+    while (worklist.size() > 0) {
+      Node* x = worklist.pop();
+      if (x == NULL || x == top()) continue;
+      if (visited.member(x)) {
+        continue;
+      } else {
+        visited.push(x);
+      }
+
+      if (x->is_Region()) {
+        for (uint i = 1; i < x->req(); i++) {
+          worklist.push(x->in(i));
+        }
+      } else {
+        worklist.push(x->in(0));
+        // We are looking for the pattern:
+        //                            /->ThreadLocal
+        // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
+        //              \->ConI(0)
+        // We want to verify that the If and the LoadB have the same control
+        // See GraphKit::g1_write_barrier_pre()
+        if (x->is_If()) {
+          IfNode *iff = x->as_If();
+          if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
+            CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
+            if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
+                && cmp->in(1)->is_Load()) {
+              LoadNode* load = cmp->in(1)->as_Load();
+              if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
+                  && load->in(2)->in(3)->is_Con()
+                  && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
+
+                Node* if_ctrl = iff->in(0);
+                Node* load_ctrl = load->in(0);
+
+                if (if_ctrl != load_ctrl) {
+                  // Skip possible CProj->NeverBranch in infinite loops
+                  if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
+                      && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
+                    if_ctrl = if_ctrl->in(0)->in(0);
+                  }
+                }
+                assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
 #endif
 
 // The Compile object keeps track of failure reasons separately from the ciEnv.
@@ -3510,7 +3579,7 @@
 }
 
 void Compile::dump_inlining() {
-  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     // Print inlining message for candidates that we couldn't inline
     // for lack of space or non constant receiver
     for (int i = 0; i < _late_inlines.length(); i++) {
@@ -3534,7 +3603,7 @@
       }
     }
     for (int i = 0; i < _print_inlining_list->length(); i++) {
-      tty->print(_print_inlining_list->at(i).ss()->as_string());
+      tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
     }
   }
 }
--- a/src/share/vm/opto/compile.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/compile.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/vmThread.hpp"
 #include "trace/tracing.hpp"
+#include "utilities/ticks.hpp"
 
 class Block;
 class Bundle;
@@ -284,6 +285,8 @@
   bool                  _do_method_data_update; // True if we generate code to update methodDataOops
   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
+  bool                  _print_inlining;        // True if we should print inlining for this compilation
+  bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 #ifndef PRODUCT
   bool                  _trace_opto_output;
   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -384,7 +387,7 @@
   };
 
   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
-  int _print_inlining;
+  int _print_inlining_idx;
 
   // Only keep nodes in the expensive node list that need to be optimized
   void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@@ -396,24 +399,24 @@
  public:
 
   outputStream* print_inlining_stream() const {
-    return _print_inlining_list->at(_print_inlining).ss();
+    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
   }
 
   void print_inlining_skip(CallGenerator* cg) {
-    if (PrintInlining) {
-      _print_inlining_list->at(_print_inlining).set_cg(cg);
-      _print_inlining++;
-      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
+    if (_print_inlining) {
+      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
+      _print_inlining_idx++;
+      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
     }
   }
 
   void print_inlining_insert(CallGenerator* cg) {
-    if (PrintInlining) {
+    if (_print_inlining) {
       for (int i = 0; i < _print_inlining_list->length(); i++) {
-        if (_print_inlining_list->at(i).cg() == cg) {
+        if (_print_inlining_list->adr_at(i)->cg() == cg) {
           _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
-          _print_inlining = i+1;
-          _print_inlining_list->at(i).set_cg(NULL);
+          _print_inlining_idx = i+1;
+          _print_inlining_list->adr_at(i)->set_cg(NULL);
           return;
         }
       }
@@ -536,6 +539,10 @@
   int               AliasLevel() const          { return _AliasLevel; }
   bool              print_assembly() const       { return _print_assembly; }
   void          set_print_assembly(bool z)       { _print_assembly = z; }
+  bool              print_inlining() const       { return _print_inlining; }
+  void          set_print_inlining(bool z)       { _print_inlining = z; }
+  bool              print_intrinsics() const     { return _print_intrinsics; }
+  void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
   // check the CompilerOracle for special behaviours for this compile
   bool          method_has_option(const char * option) {
     return method() != NULL && method()->has_option(option);
@@ -550,20 +557,19 @@
   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
 
-  jlong _latest_stage_start_counter;
+  Ticks _latest_stage_start_counter;
 
   void begin_method() {
 #ifndef PRODUCT
     if (_printer) _printer->begin_method(this);
 #endif
-    C->_latest_stage_start_counter = os::elapsed_counter();
+    C->_latest_stage_start_counter.stamp();
   }
 
   void print_method(CompilerPhaseType cpt, int level = 1) {
-    EventCompilerPhase event(UNTIMED);
+    EventCompilerPhase event;
     if (event.should_commit()) {
       event.set_starttime(C->_latest_stage_start_counter);
-      event.set_endtime(os::elapsed_counter());
       event.set_phase((u1) cpt);
       event.set_compileID(C->_compile_id);
       event.set_phaseLevel(level);
@@ -574,14 +580,13 @@
 #ifndef PRODUCT
     if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level);
 #endif
-    C->_latest_stage_start_counter = os::elapsed_counter();
+    C->_latest_stage_start_counter.stamp();
   }
 
   void end_method(int level = 1) {
-    EventCompilerPhase event(UNTIMED);
+    EventCompilerPhase event;
     if (event.should_commit()) {
       event.set_starttime(C->_latest_stage_start_counter);
-      event.set_endtime(os::elapsed_counter());
       event.set_phase((u1) PHASE_END);
       event.set_compileID(C->_compile_id);
       event.set_phaseLevel(level);
@@ -1091,6 +1096,9 @@
   // Print bytecodes, including the scope inlining tree
   void print_codes();
 
+  // Verify GC barrier patterns
+  void verify_barriers() PRODUCT_RETURN;
+
   // End-of-run dumps.
   static void print_statistics() PRODUCT_RETURN;
 
--- a/src/share/vm/opto/doCall.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/doCall.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -42,9 +42,9 @@
 #include "runtime/sharedRuntime.hpp"
 
 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
-  if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
+  if (TraceTypeProfile || C->print_inlining()) {
     outputStream* out = tty;
-    if (!PrintInlining) {
+    if (!C->print_inlining()) {
       if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
         method->print_short_name();
         tty->cr();
--- a/src/share/vm/opto/graphKit.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/graphKit.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -3596,7 +3596,7 @@
   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 
   // if (!marking)
-  __ if_then(marking, BoolTest::ne, zero); {
+  __ if_then(marking, BoolTest::ne, zero, unlikely); {
     BasicType index_bt = TypeX_X->basic_type();
     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
@@ -3604,7 +3604,7 @@
     if (do_load) {
       // load original value
       // alias_idx correct??
-      pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
+      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
     }
 
     // if (pre_val != NULL)
--- a/src/share/vm/opto/library_call.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/library_call.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -525,7 +525,7 @@
   Compile* C = kit.C;
   int nodes = C->unique();
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Intrinsic %s", str);
@@ -536,7 +536,7 @@
 
   // Try to inline the intrinsic.
   if (kit.try_to_inline()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -552,7 +552,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@@ -574,7 +574,7 @@
   int nodes = C->unique();
 #ifndef PRODUCT
   assert(is_predicted(), "sanity");
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Predicate for intrinsic %s", str);
@@ -585,7 +585,7 @@
 
   Node* slow_ctl = kit.try_to_predicate();
   if (!kit.failing()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -599,7 +599,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = "failed to generate predicate for intrinsic";
@@ -2262,7 +2262,7 @@
     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
 
 #ifndef PRODUCT
-    if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       tty->print("  from base type: ");  adr_type->dump();
       tty->print("  sharpened value: ");  tjp->dump();
     }
@@ -3158,7 +3158,7 @@
   if (mirror_con == NULL)  return false;  // cannot happen?
 
 #ifndef PRODUCT
-  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     ciType* k = mirror_con->java_mirror_type();
     if (k) {
       tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@@ -3846,7 +3846,7 @@
 // Method.invoke() and auxiliary frames.
 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   }
 #endif
@@ -3858,7 +3858,7 @@
   const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
   if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
       tty->print_cr("  Bailing out because caller depth was not a constant");
     }
 #endif
@@ -3873,7 +3873,7 @@
   int caller_depth = caller_depth_type->get_con() - 1;
   if (caller_depth < 0) {
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
       tty->print_cr("  Bailing out because caller depth was %d", caller_depth);
     }
 #endif
@@ -3882,7 +3882,7 @@
 
   if (!jvms()->has_method()) {
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
       tty->print_cr("  Bailing out because intrinsic was inlined at top level");
     }
 #endif
@@ -3919,7 +3919,7 @@
 
   if (inlining_depth == 0) {
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
       tty->print_cr("  Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
       tty->print_cr("  JVM state at this point:");
       for (int i = _depth; i >= 1; i--) {
@@ -3939,7 +3939,7 @@
   set_result(makecon(TypeInstPtr::make(caller_mirror)));
 
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("  Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
     tty->print_cr("  JVM state at this point:");
     for (int i = _depth; i >= 1; i--) {
--- a/src/share/vm/opto/loopTransform.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/loopTransform.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -623,8 +623,6 @@
 }
 
 
-#define MAX_UNROLL 16 // maximum number of unrolls for main loop
-
 //------------------------------policy_unroll----------------------------------
 // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
 // the loop is a CountedLoop and the body is small enough.
@@ -641,7 +639,7 @@
   if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
 
   int future_unroll_ct = cl->unrolled_count() * 2;
-  if (future_unroll_ct > MAX_UNROLL) return false;
+  if (future_unroll_ct > LoopMaxUnroll) return false;
 
   // Check for initial stride being a small enough constant
   if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
@@ -1956,7 +1954,7 @@
       // Find loads off the surviving projection; remove their control edge
       for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
         Node* cd = dp->fast_out(i); // Control-dependent node
-        if( cd->is_Load() ) {   // Loads can now float around in the loop
+        if (cd->is_Load() && cd->depends_only_on_test()) {   // Loads can now float around in the loop
           // Allow the load to float around in the loop, or before it
           // but NOT before the pre-loop.
           _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
--- a/src/share/vm/opto/matcher.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/matcher.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -464,17 +464,17 @@
   C->FIRST_STACK_mask().Clear();
 
   // Add in the incoming argument area
-  OptoReg::Name init = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
-  for (i = init; i < _in_arg_limit; i = OptoReg::add(i,1))
+  OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
+  for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
     C->FIRST_STACK_mask().Insert(i);
-
+  }
   // Add in all bits past the outgoing argument area
   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
             "must be able to represent all call arguments in reg mask");
-  init = _out_arg_limit;
-  for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
+  OptoReg::Name init = _out_arg_limit;
+  for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
     C->FIRST_STACK_mask().Insert(i);
-
+  }
   // Finally, set the "infinite stack" bit.
   C->FIRST_STACK_mask().set_AllStack();
 
@@ -506,16 +506,36 @@
      idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
   }
   if (Matcher::vector_size_supported(T_FLOAT,2)) {
+    // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
+    // RA guarantees such alignment since it is needed for Double and Long values.
     *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
      idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
   }
   if (Matcher::vector_size_supported(T_FLOAT,4)) {
+    // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
+    //
+    // RA can use input arguments stack slots for spills but until RA
+    // we don't know frame size and offset of input arg stack slots.
+    //
+    // Exclude last input arg stack slots to avoid spilling vectors there
+    // otherwise vector spills could stomp over stack slots in caller frame.
+    OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
+    for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
+      aligned_stack_mask.Remove(in);
+      in = OptoReg::add(in, -1);
+    }
      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
     *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
      idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
   }
   if (Matcher::vector_size_supported(T_FLOAT,8)) {
+    // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
+    OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
+    for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
+      aligned_stack_mask.Remove(in);
+      in = OptoReg::add(in, -1);
+    }
      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
--- a/src/share/vm/opto/memnode.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/memnode.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1957,6 +1957,11 @@
       if (t != NULL) {
         // constant oop => constant klass
         if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
+          if (t->is_void()) {
+            // We cannot create a void array.  Since void is a primitive type return null
+            // klass.  Users of this result need to do a null check on the returned klass.
+            return TypePtr::NULL_PTR;
+          }
           return TypeKlassPtr::make(ciArrayKlass::make(t));
         }
         if (!t->is_klass()) {
--- a/src/share/vm/opto/memnode.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/memnode.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -204,6 +204,17 @@
 protected:
   const Type* load_array_final_field(const TypeKlassPtr *tkls,
                                      ciKlass* klass) const;
+  // depends_only_on_test is almost always true, and needs to be almost always
+  // true to enable key hoisting & commoning optimizations.  However, for the
+  // special case of RawPtr loads from TLS top & end, and other loads performed by
+  // GC barriers, the control edge carries the dependence preventing hoisting past
+  // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
+  // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
+  // which produce results (new raw memory state) inside of loops preventing all
+  // manner of other optimizations).  Basically, it's ugly but so is the alternative.
+  // See comment in macro.cpp, around line 125 expand_allocate_common().
+  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
+
 };
 
 //------------------------------LoadBNode--------------------------------------
@@ -370,16 +381,6 @@
   virtual uint ideal_reg() const { return Op_RegP; }
   virtual int store_Opcode() const { return Op_StoreP; }
   virtual BasicType memory_type() const { return T_ADDRESS; }
-  // depends_only_on_test is almost always true, and needs to be almost always
-  // true to enable key hoisting & commoning optimizations.  However, for the
-  // special case of RawPtr loads from TLS top & end, the control edge carries
-  // the dependence preventing hoisting past a Safepoint instead of the memory
-  // edge.  (An unfortunate consequence of having Safepoints not set Raw
-  // Memory; itself an unfortunate consequence of having Nodes which produce
-  // results (new raw memory state) inside of loops preventing all manner of
-  // other optimizations).  Basically, it's ugly but so is the alternative.
-  // See comment in macro.cpp, around line 125 expand_allocate_common().
-  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 };
 
 
@@ -393,16 +394,6 @@
   virtual uint ideal_reg() const { return Op_RegN; }
   virtual int store_Opcode() const { return Op_StoreN; }
   virtual BasicType memory_type() const { return T_NARROWOOP; }
-  // depends_only_on_test is almost always true, and needs to be almost always
-  // true to enable key hoisting & commoning optimizations.  However, for the
-  // special case of RawPtr loads from TLS top & end, the control edge carries
-  // the dependence preventing hoisting past a Safepoint instead of the memory
-  // edge.  (An unfortunate consequence of having Safepoints not set Raw
-  // Memory; itself an unfortunate consequence of having Nodes which produce
-  // results (new raw memory state) inside of loops preventing all manner of
-  // other optimizations).  Basically, it's ugly but so is the alternative.
-  // See comment in macro.cpp, around line 125 expand_allocate_common().
-  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 };
 
 //------------------------------LoadKlassNode----------------------------------
--- a/src/share/vm/opto/reg_split.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/opto/reg_split.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -391,10 +391,15 @@
 #endif
   // See if the cloned def kills any flags, and copy those kills as well
   uint i = insidx+1;
-  if( clone_projs( b, i, def, spill, maxlrg ) ) {
+  int found_projs = clone_projs(b, i, def, spill, maxlrg);
+  if (found_projs > 0) {
     // Adjust the point where we go hi-pressure
-    if( i <= b->_ihrp_index ) b->_ihrp_index++;
-    if( i <= b->_fhrp_index ) b->_fhrp_index++;
+    if (i <= b->_ihrp_index) {
+      b->_ihrp_index += found_projs;
+    }
+    if (i <= b->_fhrp_index) {
+      b->_fhrp_index += found_projs;
+    }
   }
 
   return spill;
--- a/src/share/vm/prims/jni.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/prims/jni.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -5042,9 +5042,15 @@
   tty->print_cr("Running test: " #unit_test_function_call); \
   unit_test_function_call
 
+// Forward declaration
+void TestReservedSpace_test();
+void TestReserveMemorySpecial_test();
+
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
+    run_unit_test(TestReservedSpace_test());
+    run_unit_test(TestReserveMemorySpecial_test());
     run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
     run_unit_test(CollectedHeap::test_is_in());
--- a/src/share/vm/prims/methodHandles.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/prims/methodHandles.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -175,30 +175,32 @@
 }
 
 oop MethodHandles::init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch,
-                                          klassOop receiver_limit) {
+                                          klassOop resolved_klass) {
   AccessFlags mods = m->access_flags();
   int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
   int vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch
-  klassOop mklass = m->method_holder();
-  if (receiver_limit == NULL)
-    receiver_limit = mklass;
+  bool is_itable_call = false;
+  klassOop m_klass = m->method_holder();
+  // resolved_klass is a copy of CallInfo::resolved_klass, if available
+  if (resolved_klass == NULL)
+    resolved_klass = m_klass;
   if (m->is_initializer()) {
     flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
   } else if (mods.is_static()) {
     flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT);
-  } else if (receiver_limit != mklass &&
-             !Klass::cast(receiver_limit)->is_subtype_of(mklass)) {
+  } else if (resolved_klass != m_klass &&
+             !Klass::cast(resolved_klass)->is_subtype_of(m_klass)) {
     return NULL;  // bad receiver limit
-  } else if (Klass::cast(receiver_limit)->is_interface() &&
-             Klass::cast(mklass)->is_interface()) {
+  } else if (Klass::cast(resolved_klass)->is_interface() &&
+             Klass::cast(m_klass)->is_interface()) {
     flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
-    receiver_limit = mklass;  // ignore passed-in limit; interfaces are interconvertible
     vmindex = klassItable::compute_itable_index(m);
-  } else if (mklass != receiver_limit && Klass::cast(mklass)->is_interface()) {
+    is_itable_call = true;
+  } else if (m_klass != resolved_klass && Klass::cast(m_klass)->is_interface()) {
     flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
     // it is a miranda method, so m->vtable_index is not what we want
     ResourceMark rm;
-    klassVtable* vt = instanceKlass::cast(receiver_limit)->vtable();
+    klassVtable* vt = instanceKlass::cast(resolved_klass)->vtable();
     vmindex = vt->index_of_miranda(m->name(), m->signature());
   } else if (!do_dispatch || m->can_be_statically_bound()) {
     flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
@@ -207,10 +209,36 @@
     vmindex = m->vtable_index();
   }
 
+  if (vmindex >= 0 && !is_itable_call) {
+    if (Klass::cast(m_klass)->is_interface()) {
+      // This is a vtable call to an interface method (abstract "miranda method").
+      // The vtable index is meaningless without a class (not interface) receiver type, so get one.
+      // (LinkResolver should help us figure this out.)
+      KlassHandle m_klass_non_interface = resolved_klass;
+      if (m_klass_non_interface->is_interface()) {
+        m_klass_non_interface = SystemDictionary::Object_klass();
+#ifdef ASSERT
+        { ResourceMark rm;
+          methodOop m2 = m_klass_non_interface->vtable()->method_at(vmindex);
+          assert(m->name() == m2->name() && m->signature() == m2->signature(),
+                 err_msg("at %d, %s != %s", vmindex,
+                         m->name_and_sig_as_C_string(), m2->name_and_sig_as_C_string()));
+        }
+#endif //ASSERT
+      }
+      if (!m->is_public()) {
+        assert(m->is_public(), "virtual call must be to public interface method");
+        return NULL;  // elicit an error later in product build
+      }
+      assert(Klass::cast(resolved_klass)->is_subtype_of(m_klass_non_interface()), "virtual call must be type-safe");
+      m_klass = m_klass_non_interface();
+    }
+  }
+
   java_lang_invoke_MemberName::set_flags(mname_oop,    flags);
   java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
   java_lang_invoke_MemberName::set_vmindex(mname_oop,  vmindex);   // vtable/itable index
-  java_lang_invoke_MemberName::set_clazz(mname_oop,    Klass::cast(receiver_limit)->java_mirror());
+  java_lang_invoke_MemberName::set_clazz(mname_oop,    Klass::cast(m_klass)->java_mirror());
   // Note:  name and type can be lazily computed by resolve_MemberName,
   // if Java code needs them as resolved String and MethodType objects.
   // The clazz must be eagerly stored, because it provides a GC
@@ -571,7 +599,7 @@
 // An unresolved member name is a mere symbolic reference.
 // Resolving it plants a vmtarget/vmindex in it,
 // which refers dirctly to JVM internals.
-Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
+Handle MethodHandles::resolve_MemberName(Handle mname, KlassHandle caller, TRAPS) {
   Handle empty;
   assert(java_lang_invoke_MemberName::is_instance(mname()), "");
 
@@ -650,21 +678,49 @@
         if (ref_kind == JVM_REF_invokeStatic) {
           //do_dispatch = false;  // no need, since statics are never dispatched
           LinkResolver::resolve_static_call(result,
-                        defc, name, type, KlassHandle(), false, false, THREAD);
+                        defc, name, type, caller, caller.not_null(), false, THREAD);
         } else if (ref_kind == JVM_REF_invokeInterface) {
           LinkResolver::resolve_interface_call(result, Handle(), defc,
-                        defc, name, type, KlassHandle(), false, false, THREAD);
+                        defc, name, type, caller, caller.not_null(), false, THREAD);
         } else if (mh_invoke_id != vmIntrinsics::_none) {
           assert(!is_signature_polymorphic_static(mh_invoke_id), "");
           LinkResolver::resolve_handle_call(result,
-                        defc, name, type, KlassHandle(), THREAD);
+                        defc, name, type, caller, THREAD);
         } else if (ref_kind == JVM_REF_invokeSpecial) {
           do_dispatch = false;  // force non-virtual linkage
           LinkResolver::resolve_special_call(result,
-                        defc, name, type, KlassHandle(), false, THREAD);
+                        defc, name, type, caller, caller.not_null(), THREAD);
+          // CR 8029533:
+          // As a corner case, invokespecial can return a method *below* its resolved_klass.
+          // Since method search *starts* at the resolved_klass, the eventual
+          // method is almost always in a supertype *above* the resolved_klass.
+          // This pattern breaks when an invokespecial "over-reaches" beyond an
+          // immediate super to a method overridden by a super class.
+          // In that case, the selected method will be below the resolved_klass.
+          // (This is the behavior enabled by the famous ACC_SUPER classfile flag.)
+          //
+          // Downstream of this code, we make assumptions about resolved_klass being below m.
+          // (See init_method_MemberName, the comment "bad receiver limit".)
+          // We basically want to patch result._resolved_klass to be m.method_holder().
+          // The simplest way to get this happier outcome is to re-resolve.
+          if (!HAS_PENDING_EXCEPTION &&
+              caller.not_null() &&
+              result.resolved_method().not_null()) {
+            // this is the m_klass value that will be checked later:
+            klassOop m_klass = result.resolved_method()->method_holder();
+            if (m_klass != result.resolved_klass()() &&
+                Klass::cast(m_klass)->is_subtype_of(result.resolved_klass()())) {
+              KlassHandle adjusted_defc(THREAD, m_klass);
+              LinkResolver::resolve_special_call(result,
+                            adjusted_defc, name, type, caller, caller.not_null(), THREAD);
+              assert(HAS_PENDING_EXCEPTION  // if there is something like an OOM, pass it up to caller
+                     || result.resolved_method()->method_holder() == adjusted_defc(),
+                     "same method, different resolved_klass");
+            }
+          }
         } else if (ref_kind == JVM_REF_invokeVirtual) {
           LinkResolver::resolve_virtual_call(result, Handle(), defc,
-                        defc, name, type, KlassHandle(), false, false, THREAD);
+                        defc, name, type, caller, caller.not_null(), false, THREAD);
         } else {
           assert(false, err_msg("ref_kind=%d", ref_kind));
         }
@@ -681,7 +737,7 @@
         assert(!HAS_PENDING_EXCEPTION, "");
         if (name == vmSymbols::object_initializer_name()) {
           LinkResolver::resolve_special_call(result,
-                        defc, name, type, KlassHandle(), false, THREAD);
+                        defc, name, type, caller, caller.not_null(), THREAD);
         } else {
           break;                // will throw after end of switch
         }
@@ -1025,7 +1081,12 @@
   if (VerifyMethodHandles && caller_jh != NULL &&
       java_lang_invoke_MemberName::clazz(mname()) != NULL) {
     klassOop reference_klass = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(mname()));
-    if (reference_klass != NULL) {
+    if (reference_klass != NULL && Klass::cast(reference_klass)->oop_is_objArray()) {
+      reference_klass = objArrayKlass::cast(reference_klass)->bottom_klass();
+    }
+
+    // Reflection::verify_class_access can only handle instance classes.
+    if (reference_klass != NULL && Klass::cast(reference_klass)->oop_is_instance()) {
       // Emulate LinkResolver::check_klass_accessability.
       klassOop caller = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh));
       if (!Reflection::verify_class_access(caller,
@@ -1036,7 +1097,11 @@
     }
   }
 
-  Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK_NULL);
+  KlassHandle caller(THREAD,
+                     caller_jh == NULL ? (klassOop) NULL :
+                     java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh)));
+  Handle resolved = MethodHandles::resolve_MemberName(mname, caller, CHECK_NULL);
+
   if (resolved.is_null()) {
     int flags = java_lang_invoke_MemberName::flags(mname());
     int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK;
--- a/src/share/vm/prims/methodHandles.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/prims/methodHandles.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -51,12 +51,12 @@
 
  public:
   // working with member names
-  static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
+  static Handle resolve_MemberName(Handle mname, KlassHandle caller, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
   static Handle new_MemberName(TRAPS);  // must be followed by init_MemberName
   static oop init_MemberName(oop mname_oop, oop target_oop); // compute vmtarget/vmindex from target
   static oop init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch,
-                                    klassOop receiver_limit);
+                                    klassOop resolved_klass);
   static oop init_field_MemberName(oop mname_oop, klassOop field_holder,
                                    AccessFlags mods, oop type, oop name,
                                    intptr_t offset, bool is_setter = false);
--- a/src/share/vm/runtime/arguments.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -2549,7 +2549,9 @@
          FLAG_SET_CMDLINE(uintx, MaxNewSize, NewSize);
       }
 
+#ifndef _ALLBSD_SOURCE  // UseLargePages is not yet supported on BSD.
       FLAG_SET_DEFAULT(UseLargePages, true);
+#endif
 
       // Increase some data structure sizes for efficiency
       FLAG_SET_CMDLINE(uintx, BaseFootPrintEstimate, MaxHeapSize);
@@ -3111,6 +3113,10 @@
   UNSUPPORTED_OPTION(UseG1GC, "G1 GC");
 #endif
 
+#ifdef _ALLBSD_SOURCE  // UseLargePages is not yet supported on BSD.
+  UNSUPPORTED_OPTION(UseLargePages, "-XX:+UseLargePages");
+#endif
+
 #ifndef PRODUCT
   if (TraceBytecodesAt != 0) {
     TraceBytecodes = true;
--- a/src/share/vm/runtime/frame.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/frame.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "compiler/abstractCompiler.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/oopMapCache.hpp"
@@ -559,7 +560,7 @@
 
   st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
   if (sp() != NULL)
-    st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc());
+    st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc());
 
   if (StubRoutines::contains(pc())) {
     st->print_cr(")");
@@ -720,10 +721,14 @@
     } else if (_cb->is_buffer_blob()) {
       st->print("v  ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
     } else if (_cb->is_nmethod()) {
-      methodOop m = ((nmethod *)_cb)->method();
+      nmethod* nm = (nmethod*)_cb;
+      methodOop m = nm->method();
       if (m != NULL) {
         m->name_and_sig_as_C_string(buf, buflen);
-        st->print("J  %s", buf);
+        st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]",
+                  nm->compile_id(), (nm->is_osr_method() ? "%" : ""),
+                  ((nm->compiler() != NULL) ? nm->compiler()->name() : ""),
+                  buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin());
       } else {
         st->print("J  " PTR_FORMAT, pc());
       }
--- a/src/share/vm/runtime/globals.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/globals.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1917,6 +1917,9 @@
   notproduct(bool, ExecuteInternalVMTests, false,                           \
           "Enable execution of internal VM tests.")                         \
                                                                             \
+  notproduct(bool, VerboseInternalVMTests, false,                           \
+          "Turn on logging for internal VM tests.")                         \
+                                                                            \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
   product_pd(bool, ResizeTLAB,                                              \
--- a/src/share/vm/runtime/os.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/os.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -330,8 +330,8 @@
 
   static char*  non_memory_address_word();
   // reserve, commit and pin the entire memory region
-  static char*  reserve_memory_special(size_t size, char* addr = NULL,
-                bool executable = false);
+  static char*  reserve_memory_special(size_t size, size_t alignment,
+                                       char* addr, bool executable);
   static bool   release_memory_special(char* addr, size_t bytes);
   static void   large_page_init();
   static size_t large_page_size();
--- a/src/share/vm/runtime/reflection.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/reflection.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -460,7 +460,7 @@
   // doesn't have a classloader.
   if ((current_class == NULL) ||
       (current_class == new_class) ||
-      (instanceKlass::cast(new_class)->is_public()) ||
+      (Klass::cast(new_class)->is_public()) ||
       is_same_class_package(current_class, new_class)) {
     return true;
   }
--- a/src/share/vm/runtime/sweeper.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/sweeper.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
 #include "runtime/vm_operations.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/events.hpp"
+#include "utilities/ticks.inline.hpp"
 #include "utilities/xmlstream.hpp"
 
 #ifdef ASSERT
@@ -148,12 +149,12 @@
 
 int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
 int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
-jlong     NMethodSweeper::_total_time_sweeping = 0;
-jlong     NMethodSweeper::_total_time_this_sweep = 0;
-jlong     NMethodSweeper::_peak_sweep_time = 0;
-jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
-jlong     NMethodSweeper::_total_disconnect_time = 0;
-jlong     NMethodSweeper::_peak_disconnect_time = 0;
+Tickspan  NMethodSweeper::_total_time_sweeping;
+Tickspan  NMethodSweeper::_total_time_this_sweep;
+Tickspan  NMethodSweeper::_peak_sweep_time;
+Tickspan  NMethodSweeper::_peak_sweep_fraction_time;
+Tickspan  NMethodSweeper::_total_disconnect_time;
+Tickspan  NMethodSweeper::_peak_disconnect_time;
 
 class MarkActivationClosure: public CodeBlobClosure {
 public:
@@ -192,7 +193,7 @@
     _invocations = NmethodSweepFraction;
     _current     = CodeCache::first_nmethod();
     _traversals  += 1;
-    _total_time_this_sweep = 0;
+    _total_time_this_sweep = Tickspan();
 
     if (PrintMethodFlushing) {
       tty->print_cr("### Sweep: stack traversal %d", _traversals);
@@ -256,8 +257,7 @@
 }
 
 void NMethodSweeper::sweep_code_cache() {
-
-  jlong sweep_start_counter = os::elapsed_counter();
+  Ticks sweep_start_counter = Ticks::now();
 
   _flushed_count   = 0;
   _zombified_count = 0;
@@ -322,8 +322,8 @@
     }
   }
 
-  jlong sweep_end_counter = os::elapsed_counter();
-  jlong sweep_time = sweep_end_counter - sweep_start_counter;
+  const Ticks sweep_end_counter = Ticks::now();
+  const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
   _total_time_sweeping  += sweep_time;
   _total_time_this_sweep += sweep_time;
   _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
@@ -344,7 +344,7 @@
 
 #ifdef ASSERT
   if(PrintMethodFlushing) {
-    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
+    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time.value());
   }
 #endif
 
@@ -529,7 +529,7 @@
     }
   }
 
-  jlong disconnect_start_counter = os::elapsed_counter();
+  Ticks disconnect_start_counter = Ticks::now();
 
   // Traverse the code cache trying to dump the oldest nmethods
   uint curr_max_comp_id = CompileBroker::get_compilation_id();
@@ -577,8 +577,8 @@
     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   }
 
-  jlong disconnect_end_counter = os::elapsed_counter();
-  jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
+  const Ticks disconnect_end_counter = Ticks::now();
+  const Tickspan disconnect_time = disconnect_end_counter - disconnect_start_counter;
   _total_disconnect_time += disconnect_time;
   _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
 
@@ -597,7 +597,7 @@
 #ifdef ASSERT
 
   if(PrintMethodFlushing && Verbose) {
-    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
+    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time.value());
   }
 #endif
 }
--- a/src/share/vm/runtime/sweeper.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/sweeper.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_RUNTIME_SWEEPER_HPP
 #define SHARE_VM_RUNTIME_SWEEPER_HPP
 
+#include "utilities/ticks.hpp"
 // An NmethodSweeper is an incremental cleaner for:
 //    - cleanup inline caches
 //    - reclamation of unreferences zombie nmethods
@@ -56,12 +57,12 @@
   // Stat counters
   static int       _number_of_flushes;            // Total of full traversals caused by full cache
   static int       _total_nof_methods_reclaimed;  // Accumulated nof methods flushed
-  static jlong     _total_time_sweeping;          // Accumulated time sweeping
-  static jlong     _total_time_this_sweep;        // Total time this sweep
-  static jlong     _peak_sweep_time;              // Peak time for a full sweep
-  static jlong     _peak_sweep_fraction_time;     // Peak time sweeping one fraction
-  static jlong     _total_disconnect_time;        // Total time cleaning code mem
-  static jlong     _peak_disconnect_time;         // Peak time cleaning code mem
+  static Tickspan     _total_time_sweeping;          // Accumulated time sweeping
+  static Tickspan     _total_time_this_sweep;        // Total time this sweep
+  static Tickspan     _peak_sweep_time;              // Peak time for a full sweep
+  static Tickspan     _peak_sweep_fraction_time;     // Peak time sweeping one fraction
+  static Tickspan     _total_disconnect_time;        // Total time cleaning code mem
+  static Tickspan     _peak_disconnect_time;         // Peak time cleaning code mem
 
   static void process_nmethod(nmethod *nm);
 
@@ -71,11 +72,11 @@
   static long traversal_count()              { return _traversals; }
   static int  number_of_flushes()            { return _number_of_flushes; }
   static int  total_nof_methods_reclaimed()  { return _total_nof_methods_reclaimed; }
-  static jlong total_time_sweeping()         { return _total_time_sweeping; }
-  static jlong peak_sweep_time()             { return _peak_sweep_time; }
-  static jlong peak_sweep_fraction_time()    { return _peak_sweep_fraction_time; }
-  static jlong total_disconnect_time()       { return _total_disconnect_time; }
-  static jlong peak_disconnect_time()        { return _peak_disconnect_time; }
+  static const Tickspan total_time_sweeping()         { return _total_time_sweeping; }
+  static const Tickspan peak_sweep_time()             { return _peak_sweep_time; }
+  static const Tickspan peak_sweep_fraction_time()    { return _peak_sweep_fraction_time; }
+  static const Tickspan total_disconnect_time()       { return _total_disconnect_time; }
+  static const Tickspan peak_disconnect_time()        { return _peak_disconnect_time; }
 
 #ifdef ASSERT
   // Keep track of sweeper activity in the ring buffer
--- a/src/share/vm/runtime/virtualspace.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/virtualspace.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -42,8 +42,19 @@
 
 
 // ReservedSpace
+
+// Dummy constructor
+ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
+    _alignment(0), _special(false), _executable(false) {
+}
+
 ReservedSpace::ReservedSpace(size_t size) {
-  initialize(size, 0, false, NULL, 0, false);
+  size_t page_size = os::page_size_for_region(size, size, 1);
+  bool large_pages = page_size != (size_t)os::vm_page_size();
+  // Don't force the alignment to be large page aligned,
+  // since that will waste memory.
+  size_t alignment = os::vm_allocation_granularity();
+  initialize(size, alignment, large_pages, NULL, 0, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
@@ -323,16 +334,18 @@
 
   if (special) {
 
-    base = os::reserve_memory_special(size, requested_address, executable);
+    base = os::reserve_memory_special(size, alignment, requested_address, executable);
 
     if (base != NULL) {
       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
         // OS ignored requested address. Try different address.
         return;
       }
-      // Check alignment constraints
+      // Check alignment constraints.
       assert((uintptr_t) base % alignment == 0,
-             "Large pages returned a non-aligned address");
+             err_msg("Large pages returned a non-aligned address, base: "
+                 PTR_FORMAT " alignment: " PTR_FORMAT,
+                 base, (void*)(uintptr_t)alignment));
       _special = true;
     } else {
       // failed; try to reserve regular memory below
@@ -928,4 +941,188 @@
   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 }
 
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+  do {\
+    if (VerboseInternalVMTests) { \
+      tty->print_cr(__VA_ARGS__); \
+      tty->flush(); \
+    }\
+  } while (false)
+
+class TestReservedSpace : AllStatic {
+ public:
+  static void small_page_write(void* addr, size_t size) {
+    size_t page_size = os::vm_page_size();
+
+    char* end = (char*)addr + size;
+    for (char* p = (char*)addr; p < end; p += page_size) {
+      *p = 1;
+    }
+  }
+
+  static void release_memory_for_test(ReservedSpace rs) {
+    if (rs.special()) {
+      guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
+    } else {
+      guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
+    }
+  }
+
+  static void test_reserved_space1(size_t size, size_t alignment) {
+    test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
+
+    assert(is_size_aligned(size, alignment), "Incorrect input parameters");
+
+    ReservedSpace rs(size,          // size
+                     alignment,     // alignment
+                     UseLargePages, // large
+                     NULL,          // requested_address
+                     0);            // noacces_prefix
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
+    assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+  static void test_reserved_space2(size_t size) {
+    test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
+
+    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+
+    ReservedSpace rs(size);
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+  static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
+    test_log("test_reserved_space3(%p, %p, %d)",
+        (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
+
+    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+    assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
+
+    bool large = maybe_large && UseLargePages && size >= os::large_page_size();
+
+    ReservedSpace rs(size, alignment, large, false);
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+
+  static void test_reserved_space1() {
+    size_t size = 2 * 1024 * 1024;
+    size_t ag   = os::vm_allocation_granularity();
+
+    test_reserved_space1(size,      ag);
+    test_reserved_space1(size * 2,  ag);
+    test_reserved_space1(size * 10, ag);
+  }
+
+  static void test_reserved_space2() {
+    size_t size = 2 * 1024 * 1024;
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserved_space2(size * 1);
+    test_reserved_space2(size * 2);
+    test_reserved_space2(size * 10);
+    test_reserved_space2(ag);
+    test_reserved_space2(size - ag);
+    test_reserved_space2(size);
+    test_reserved_space2(size + ag);
+    test_reserved_space2(size * 2);
+    test_reserved_space2(size * 2 - ag);
+    test_reserved_space2(size * 2 + ag);
+    test_reserved_space2(size * 3);
+    test_reserved_space2(size * 3 - ag);
+    test_reserved_space2(size * 3 + ag);
+    test_reserved_space2(size * 10);
+    test_reserved_space2(size * 10 + size / 2);
+  }
+
+  static void test_reserved_space3() {
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserved_space3(ag,      ag    , false);
+    test_reserved_space3(ag * 2,  ag    , false);
+    test_reserved_space3(ag * 3,  ag    , false);
+    test_reserved_space3(ag * 2,  ag * 2, false);
+    test_reserved_space3(ag * 4,  ag * 2, false);
+    test_reserved_space3(ag * 8,  ag * 2, false);
+    test_reserved_space3(ag * 4,  ag * 4, false);
+    test_reserved_space3(ag * 8,  ag * 4, false);
+    test_reserved_space3(ag * 16, ag * 4, false);
+
+    if (UseLargePages) {
+      size_t lp = os::large_page_size();
+
+      // Without large pages
+      test_reserved_space3(lp,     ag * 4, false);
+      test_reserved_space3(lp * 2, ag * 4, false);
+      test_reserved_space3(lp * 4, ag * 4, false);
+      test_reserved_space3(lp,     lp    , false);
+      test_reserved_space3(lp * 2, lp    , false);
+      test_reserved_space3(lp * 3, lp    , false);
+      test_reserved_space3(lp * 2, lp * 2, false);
+      test_reserved_space3(lp * 4, lp * 2, false);
+      test_reserved_space3(lp * 8, lp * 2, false);
+
+      // With large pages
+      test_reserved_space3(lp, ag * 4    , true);
+      test_reserved_space3(lp * 2, ag * 4, true);
+      test_reserved_space3(lp * 4, ag * 4, true);
+      test_reserved_space3(lp, lp        , true);
+      test_reserved_space3(lp * 2, lp    , true);
+      test_reserved_space3(lp * 3, lp    , true);
+      test_reserved_space3(lp * 2, lp * 2, true);
+      test_reserved_space3(lp * 4, lp * 2, true);
+      test_reserved_space3(lp * 8, lp * 2, true);
+    }
+  }
+
+  static void test_reserved_space() {
+    test_reserved_space1();
+    test_reserved_space2();
+    test_reserved_space3();
+  }
+};
+
+void TestReservedSpace_test() {
+  TestReservedSpace::test_reserved_space();
+}
+
+#endif // PRODUCT
+
 #endif
--- a/src/share/vm/runtime/virtualspace.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/runtime/virtualspace.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -90,6 +90,7 @@
 
  public:
   // Constructor
+  ReservedSpace();
   ReservedSpace(size_t size);
   ReservedSpace(size_t size, size_t alignment, bool large,
                 char* requested_address = NULL,
--- a/src/share/vm/services/attachListener.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/services/attachListener.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -470,7 +470,17 @@
                        vmSymbols::threadgroup_string_void_signature(),
                        thread_group,
                        string,
-                       CHECK);
+                       THREAD);
+
+  if (HAS_PENDING_EXCEPTION) {
+    tty->print_cr("Exception in VM (AttachListener::init) : ");
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    tty->cr();
+
+    CLEAR_PENDING_EXCEPTION;
+
+    return;
+  }
 
   KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
   JavaCalls::call_special(&result,
@@ -479,7 +489,17 @@
                         vmSymbols::add_method_name(),
                         vmSymbols::thread_void_signature(),
                         thread_oop,             // ARG 1
-                        CHECK);
+                        THREAD);
+
+  if (HAS_PENDING_EXCEPTION) {
+    tty->print_cr("Exception in VM (AttachListener::init) : ");
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    tty->cr();
+
+    CLEAR_PENDING_EXCEPTION;
+
+    return;
+  }
 
   { MutexLocker mu(Threads_lock);
     JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
--- a/src/share/vm/services/memTracker.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/services/memTracker.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -302,6 +302,13 @@
     }
   }
 
+  static inline void record_virtual_memory_release(address addr, size_t size,
+      Thread* thread = NULL) {
+    if (is_on()) {
+      Tracker tkr(Tracker::Release, thread);
+      tkr.record(addr, size);
+    }
+  }
 
   // record memory type on virtual memory base address
   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
--- a/src/share/vm/trace/noTraceBackend.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/noTraceBackend.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -25,9 +25,7 @@
 #define SHARE_VM_TRACE_NOTRACEBACKEND_HPP
 
 #include "prims/jni.h"
-
-typedef jlong TracingTime;
-typedef jlong RelativeTracingTime;
+#include "trace/traceTime.hpp"
 
 class NoTraceBackend {
 public:
@@ -44,5 +42,3 @@
 typedef NoTraceBackend Tracing;
 
 #endif
-
-
--- a/src/share/vm/trace/trace.xml	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/trace.xml	Wed Jan 15 10:53:50 2014 -0800
@@ -169,8 +169,8 @@
       <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
       <value type="GCNAME" field="name" label="Name" description="The name of the Garbage Collector" />
       <value type="GCCAUSE" field="cause" label="Cause" description="The reason for triggering this Garbage Collection" />
-      <value type="RELATIVE_TICKS" field="sumOfPauses" label="Sum of Pauses" description="Sum of all the times in which Java execution was paused during the garbage collection" />
-      <value type="RELATIVE_TICKS" field="longestPause" label="Longest Pause" description="Longest individual pause during the garbage collection" />
+      <value type="TICKSPAN" field="sumOfPauses" label="Sum of Pauses" description="Sum of all the times in which Java execution was paused during the garbage collection" />
+      <value type="TICKSPAN" field="longestPause" label="Longest Pause" description="Longest individual pause during the garbage collection" />
     </event>
 
     <event id="GCParallelOld" path="vm/gc/collector/parold_garbage_collection" label="Parallel Old Garbage Collection"
--- a/src/share/vm/trace/traceBackend.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/traceBackend.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -26,10 +26,11 @@
 
 #if INCLUDE_TRACE
 
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
 #include "trace/traceTime.hpp"
 #include "tracefiles/traceEventIds.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
+
 
 class TraceBackend {
 public:
@@ -44,10 +45,6 @@
     return os::elapsed_counter();
   }
 
-  static TracingTime time_adjustment(jlong time) {
-    return time;
-  }
-
   static void on_unloading_classes(BoolObjectClosure* is_alive, int no_of_classes_unloading) {
   }
 };
--- a/src/share/vm/trace/traceEvent.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/traceEvent.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,13 +36,10 @@
 #include "trace/tracing.hpp"
 #include "tracefiles/traceEventIds.hpp"
 #include "tracefiles/traceTypes.hpp"
+#include "utilities/ticks.hpp"
 
 template<typename T>
 class TraceEvent : public StackObj {
- protected:
-  jlong _startTime;
-  jlong _endTime;
-
  private:
   bool _started;
 #ifdef ASSERT
@@ -52,6 +49,18 @@
   bool _ignore_check;
 #endif
 
+ protected:
+  jlong _startTime;
+  jlong _endTime;
+
+  void set_starttime(const TracingTime& time) {
+    _startTime = time;
+  }
+
+  void set_endtime(const TracingTime& time) {
+    _endTime = time;
+  }
+
  public:
   TraceEvent(EventStartTime timing=TIMED) :
     _startTime(0),
@@ -90,7 +99,7 @@
         return;
     }
     if (_endTime == 0) {
-      static_cast<T *>(this)->set_endtime(Tracing::time());
+      static_cast<T*>(this)->set_endtime(Tracing::time());
     }
     if (static_cast<T*>(this)->should_write()) {
       static_cast<T*>(this)->writeEvent();
@@ -98,12 +107,12 @@
     set_commited();
   }
 
-  void set_starttime(jlong time) {
-    _startTime = time;
+  void set_starttime(const Ticks& time) {
+    _startTime = time.value();
   }
 
-  void set_endtime(jlong time) {
-    _endTime = time;
+  void set_endtime(const Ticks& time) {
+    _endTime = time.value();
   }
 
   TraceEventId id() const {
--- a/src/share/vm/trace/traceEventClasses.xsl	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/traceEventClasses.xsl	Wed Jan 15 10:53:50 2014 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,8 @@
 -->
 
 <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:import href="xsl_util.xsl"/>
 <xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-<xsl:import href="xsl_util.xsl"/>
 
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
@@ -40,6 +40,7 @@
 #include "runtime/handles.inline.hpp"
 #include "tracefiles/traceTypes.hpp"
 #include "trace/traceEvent.hpp"
+#include "utilities/ticks.hpp"
 
 #if INCLUDE_TRACE
 
@@ -54,8 +55,8 @@
 class TraceEvent {
 public:
   TraceEvent() {}
-  void set_starttime(jlong time) const {}
-  void set_endtime(jlong time) const {}
+  void set_starttime(const Ticks&amp; time) {}
+  void set_endtime(const Ticks&amp; time) {}
   bool should_commit() const { return false; }
   void commit() const {}
 };
@@ -174,20 +175,21 @@
 
 <xsl:template match="value[@type='TICKS']" mode="write-setters">
 #if INCLUDE_TRACE
-  <xsl:value-of select="concat('void set_', @field, '(jlong time) { _', @field, ' = time; }')"/>
+<xsl:value-of select="concat('  void set_', @field, '(const Ticks&amp; time) { _', @field, ' = time; }')"/>
 #else
-  <xsl:value-of select="concat('void set_', @field, '(jlong ignore) {}')"/>
+<xsl:value-of select="concat('  void set_', @field, '(const Ticks&amp; ignore) {}')"/>
 #endif
 </xsl:template>
 
-<xsl:template match="value[@type='RELATIVE_TICKS']" mode="write-setters">
+<xsl:template match="value[@type='TICKSPAN']" mode="write-setters">
 #if INCLUDE_TRACE
-  <xsl:value-of select="concat('void set_', @field, '(jlong time) { _', @field, ' = time; }')"/>
+  <xsl:value-of select="concat('  void set_', @field, '(const Tickspan&amp; time) { _', @field, ' = time; }')"/>
 #else
-  <xsl:value-of select="concat('void set_', @field, '(jlong ignore) {}')"/>
+  <xsl:value-of select="concat('  void set_', @field, '(const Tickspan&amp; ignore) {}')"/>
 #endif
 </xsl:template>
 
+
 <xsl:template match="value" mode="write-fields">
   <xsl:variable name="type" select="@type"/>
   <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
@@ -227,7 +229,17 @@
 <xsl:template match="value" mode="write-data">
   <xsl:variable name="type" select="@type"/>
   <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@writetype"/>
-  <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, ');')"/>
+  <xsl:choose>
+    <xsl:when test="@type='TICKSPAN'">
+      <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, '.value());')"/>
+    </xsl:when>
+    <xsl:when test="@type='TICKS'">
+      <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, '.value());')"/>
+    </xsl:when>
+    <xsl:otherwise>
+      <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, ');')"/>
+    </xsl:otherwise>
+  </xsl:choose>
   <xsl:if test="position() != last()">
     <xsl:text>
     ts.print(", ");
--- a/src/share/vm/trace/traceEventIds.xsl	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/traceEventIds.xsl	Wed Jan 15 10:53:50 2014 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,8 @@
 -->
 
 <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:import href="xsl_util.xsl"/>
 <xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-<xsl:import href="xsl_util.xsl"/>
 
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
--- a/src/share/vm/trace/traceMacros.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/traceMacros.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/trace/traceTime.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/traceTime.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,5 @@
 #include "prims/jni.h"
 
 typedef jlong TracingTime;
-typedef jlong RelativeTracingTime;
 
-#endif
+#endif // SHARE_VM_TRACE_TRACETIME_HPP
--- a/src/share/vm/trace/traceTypes.xsl	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/traceTypes.xsl	Wed Jan 15 10:53:50 2014 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,8 @@
 -->
 
 <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:import href="xsl_util.xsl"/>
 <xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-<xsl:import href="xsl_util.xsl"/>
 
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
@@ -32,11 +32,13 @@
 #ifndef TRACEFILES_JFRTYPES_HPP
 #define TRACEFILES_JFRTYPES_HPP
 
+
+#include "oops/klassOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/symbol.hpp"
 #include "trace/traceDataTypes.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "oops/symbol.hpp"
-#include "oops/klassOop.hpp"
-#include "oops/methodOop.hpp"
+#include "utilities/ticks.hpp"
 
 enum JVMContentType {
   _not_a_content_type = (JVM_CONTENT_TYPES_START - 1),
--- a/src/share/vm/trace/tracetypes.xml	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/tracetypes.xml	Wed Jan 15 10:53:50 2014 -0800
@@ -249,13 +249,13 @@
     <primary_type symbol="NANOS" datatype="LONG" contenttype="NANOS"
                   type="s8" sizeop="sizeof(s8)"/>
 
-    <!-- 64-bit signed integer, SEMANTIC value ABSOLUTE TICKS -->
+    <!-- 64-bit signed integer, SEMANTIC value TICKS -->
     <primary_type symbol="TICKS" datatype="LONG" contenttype="TICKS"
-                  type="s8" sizeop="sizeof(s8)"/>
+                  type="Ticks" sizeop="sizeof(s8)"/>
 
-    <!-- 64-bit signed integer, SEMANTIC value RELATIVE TICKS -->
-    <primary_type symbol="RELATIVE_TICKS" datatype="LONG" contenttype="TICKS"
-                  type="s8" sizeop="sizeof(s8)"/>
+    <!-- 64-bit signed integer, SEMANTIC value TICKS duration -->
+    <primary_type symbol="TICKSPAN" datatype="LONG" contenttype="TICKS"
+                  type="Tickspan" sizeop="sizeof(s8)"/>
 
     <!-- 64-bit unsigned integer, SEMANTIC value ADDRESS (mem loc) -->
     <primary_type symbol="ADDRESS" datatype="U8" contenttype="ADDRESS"
--- a/src/share/vm/trace/tracing.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/trace/tracing.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/globalDefinitions.hpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -373,6 +373,14 @@
 
 #define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
 
+inline bool is_size_aligned(size_t size, size_t alignment) {
+  return align_size_up_(size, alignment) == size;
+}
+
+inline bool is_ptr_aligned(void* ptr, size_t alignment) {
+  return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr;
+}
+
 inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
   return align_size_up_(size, alignment);
 }
@@ -383,6 +391,14 @@
   return align_size_down_(size, alignment);
 }
 
+inline void* align_ptr_up(void* ptr, size_t alignment) {
+  return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment);
+}
+
+inline void* align_ptr_down(void* ptr, size_t alignment) {
+  return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment);
+}
+
 // Align objects by rounding up their size, in HeapWord units.
 
 #define align_object_size_(size) align_size_up_(size, MinObjAlignment)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/ticks.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/os.hpp"
+#include "utilities/ticks.inline.hpp"
+
+#ifdef ASSERT
+ const jlong Ticks::invalid_time_stamp = -2; // 0xFFFF FFFF`FFFF FFFE
+#endif
+
+void Ticks::stamp() {
+  _stamp_ticks = os::elapsed_counter();
+}
+
+const Ticks Ticks::now() {
+  Ticks t;
+  t.stamp();
+  return t;
+}
+
+Tickspan::Tickspan(const Ticks& end, const Ticks& start) {
+  assert(end.value() != Ticks::invalid_time_stamp, "end is unstamped!");
+  assert(start.value() != Ticks::invalid_time_stamp, "start is unstamped!");
+
+  assert(end >= start, "negative time!");
+
+  _span_ticks = end.value() - start.value();
+}
+
+template <typename ReturnType>
+static ReturnType time_conversion(const Tickspan& span, TicksToTimeHelper::Unit unit) {
+  assert(TicksToTimeHelper::SECONDS == unit ||
+         TicksToTimeHelper::MILLISECONDS == unit, "invalid unit!");
+
+  ReturnType frequency_per_unit = (ReturnType)os::elapsed_frequency() / (ReturnType)unit;
+
+  return (ReturnType) ((ReturnType)span.value() / frequency_per_unit);
+}
+
+double TicksToTimeHelper::seconds(const Tickspan& span) {
+  return time_conversion<double>(span, SECONDS);
+}
+
+jlong TicksToTimeHelper::milliseconds(const Tickspan& span) {
+  return time_conversion<jlong>(span, MILLISECONDS);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/ticks.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_TICKS_HPP
+#define SHARE_VM_UTILITIES_TICKS_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class Ticks;
+
+class Tickspan VALUE_OBJ_CLASS_SPEC {
+  friend class Ticks;
+  friend Tickspan operator-(const Ticks& end, const Ticks& start);
+
+ private:
+  jlong _span_ticks;
+
+  Tickspan(const Ticks& end, const Ticks& start);
+
+ public:
+  Tickspan() : _span_ticks(0) {}
+
+  Tickspan& operator+=(const Tickspan& rhs) {
+    _span_ticks += rhs._span_ticks;
+    return *this;
+  }
+
+  jlong value() const {
+    return _span_ticks;
+  }
+
+};
+
+class Ticks VALUE_OBJ_CLASS_SPEC {
+ private:
+  jlong _stamp_ticks;
+
+ public:
+  Ticks() : _stamp_ticks(0) {
+    assert((_stamp_ticks = invalid_time_stamp) == invalid_time_stamp,
+      "initial unstamped time value assignment");
+  }
+
+  Ticks& operator+=(const Tickspan& span) {
+    _stamp_ticks += span.value();
+    return *this;
+  }
+
+  Ticks& operator-=(const Tickspan& span) {
+    _stamp_ticks -= span.value();
+    return *this;
+  }
+
+  void stamp();
+
+  jlong value() const {
+    return _stamp_ticks;
+  }
+
+  static const Ticks now();
+
+#ifdef ASSERT
+  static const jlong invalid_time_stamp;
+#endif
+
+#ifndef PRODUCT
+  // only for internal use by GC VM tests
+  friend class TimePartitionPhasesIteratorTest;
+  friend class GCTimerTest;
+
+ private:
+  // implicit type conversion
+  Ticks(int ticks) : _stamp_ticks(ticks) {}
+
+#endif // !PRODUCT
+
+};
+
+class TicksToTimeHelper : public AllStatic {
+ public:
+  enum Unit {
+    SECONDS = 1,
+    MILLISECONDS = 1000
+  };
+  static double seconds(const Tickspan& span);
+  static jlong milliseconds(const Tickspan& span);
+};
+
+#endif // SHARE_VM_UTILITIES_TICKS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/ticks.inline.hpp	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_TICKS_INLINE_HPP
+#define SHARE_VM_UTILITIES_TICKS_INLINE_HPP
+
+#include "utilities/ticks.hpp"
+
+inline Tickspan operator+(Tickspan lhs, const Tickspan& rhs) {
+  lhs += rhs;
+  return lhs;
+}
+
+inline bool operator==(const Tickspan& lhs, const Tickspan& rhs) {
+  return lhs.value() == rhs.value();
+}
+
+inline bool operator!=(const Tickspan& lhs, const Tickspan& rhs) {
+  return !operator==(lhs,rhs);
+}
+
+inline bool operator<(const Tickspan& lhs, const Tickspan& rhs) {
+  return lhs.value() < rhs.value();
+}
+
+inline bool operator>(const Tickspan& lhs, const Tickspan& rhs) {
+  return operator<(rhs,lhs);
+}
+
+inline bool operator<=(const Tickspan& lhs, const Tickspan& rhs) {
+  return !operator>(lhs,rhs);
+}
+
+inline bool operator>=(const Tickspan& lhs, const Tickspan& rhs) {
+  return !operator<(lhs,rhs);
+}
+
+inline Ticks operator+(Ticks lhs, const Tickspan& span) {
+  lhs += span;
+  return lhs;
+}
+
+inline Ticks operator-(Ticks lhs, const Tickspan& span) {
+  lhs -= span;
+  return lhs;
+}
+
+inline Tickspan operator-(const Ticks& end, const Ticks& start) {
+  return Tickspan(end, start);
+}
+
+inline bool operator==(const Ticks& lhs, const Ticks& rhs) {
+  return lhs.value() == rhs.value();
+}
+
+inline bool operator!=(const Ticks& lhs, const Ticks& rhs) {
+  return !operator==(lhs,rhs);
+}
+
+inline bool operator<(const Ticks& lhs, const Ticks& rhs) {
+  return lhs.value() < rhs.value();
+}
+
+inline bool operator>(const Ticks& lhs, const Ticks& rhs) {
+  return operator<(rhs,lhs);
+}
+
+inline bool operator<=(const Ticks& lhs, const Ticks& rhs) {
+  return !operator>(lhs,rhs);
+}
+
+inline bool operator>=(const Ticks& lhs, const Ticks& rhs) {
+  return !operator<(lhs,rhs);
+}
+
+#endif // SHARE_VM_UTILITIES_TICKS_INLINE_HPP
--- a/src/share/vm/utilities/vmError.cpp	Wed Jan 15 10:45:35 2014 -0800
+++ b/src/share/vm/utilities/vmError.cpp	Wed Jan 15 10:53:50 2014 -0800
@@ -583,6 +583,13 @@
           while (count++ < StackPrintLimit) {
              fr.print_on_error(st, buf, sizeof(buf));
              st->cr();
+             // Compiled code may use EBP register on x86 so it looks like
+             // non-walkable C frame. Use frame.sender() for java frames.
+             if (_thread && _thread->is_Java_thread() && fr.is_java_frame()) {
+               RegisterMap map((JavaThread*)_thread, false); // No update
+               fr = fr.sender(&map);
+               continue;
+             }
              if (os::is_first_C_frame(&fr)) break;
              fr = os::get_sender_for_C_frame(&fr);
           }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/8004051/Test8004051.java	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8004051
+ * @bug 8005722
+ * @summary assert(_oprs_len[mode] < maxNumberOfOperands) failed: array overflow
+ *
+ * @run main/othervm -Xcomp -client Test8004051
+ */
+
+public class Test8004051 {
+    public static void main(String[] argv) {
+        Object o = new Object();
+        fillPrimRect(1.1f, 1.2f, 1.3f, 1.4f,
+                     o, o,
+                     1.5f, 1.6f, 1.7f, 1.8f,
+                     2.0f, 2.1f, 2.2f, 2.3f,
+                     2.4f, 2.5f, 2.6f, 2.7f,
+                     100, 101);
+        System.out.println("Test passed, test did not assert");
+    }
+
+    static boolean fillPrimRect(float x, float y, float w, float h,
+                                Object rectTex, Object wrapTex,
+                                float bx, float by, float bw, float bh,
+                                float f1, float f2, float f3, float f4,
+                                float f5, float f6, float f7, float f8,
+                                int i1, int i2 ) {
+        System.out.println(x + " " + y + " " + w + " " + h + " " +
+                           bx + " " + by + " " + bw + " " + bh);
+        return true;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/gcbarriers/G1CrashTest.java	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8023472
+ * @summary C2 optimization breaks with G1
+ *
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Dcount=100000 G1CrashTest
+ *
+ * @author pbiswal@palantir.com
+ */
+
+public class G1CrashTest {
+    static Object[] set = new Object[11];
+
+    public static void main(String[] args) throws InterruptedException {
+        for (int j = 0; j < Integer.getInteger("count"); j++) {
+            Object key = new Object();
+            insertKey(key);
+            if (j > set.length / 2) {
+                Object[] oldKeys = set;
+                set = new Object[2 * set.length - 1];
+                for (Object o : oldKeys) {
+                    if (o != null)
+                        insertKey(o);
+                }
+            }
+        }
+    }
+
+    static void insertKey(Object key) {
+        int hash = key.hashCode() & 0x7fffffff;
+        int index = hash % set.length;
+        Object cur = set[index];
+        if (cur == null)
+            set[index] = key;
+        else
+            insertKeyRehash(key, index, hash, cur);
+    }
+
+    static void insertKeyRehash(Object key, int index, int hash, Object cur) {
+        int loopIndex = index;
+        int firstRemoved = -1;
+        do {
+            if (cur == "dead")
+                firstRemoved = 1;
+            index--;
+            if (index < 0)
+                index += set.length;
+            cur = set[index];
+            if (cur == null) {
+                if (firstRemoved != -1)
+                    set[firstRemoved] = "dead";
+                else
+                    set[index] = key;
+                return;
+            }
+        } while (index != loopIndex);
+        if (firstRemoved != -1)
+            set[firstRemoved] = null;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/print/PrintInlining.java	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8022585
+ * @summary VM crashes when ran with -XX:+PrintInlining
+ * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
+ *
+ */
+
+public class PrintInlining {
+  public static void main(String[] args) {
+    System.out.println("Passed");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/reflection/ArrayNewInstanceOfVoid.java	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8029366
+ * @summary ShouldNotReachHere error when creating an array with component type of void
+ */
+
+public class ArrayNewInstanceOfVoid {
+    public static void main(String[] args) {
+        for (int i = 0; i < 100_000; i++) {
+            test();
+        }
+    }
+
+    private static void test() {
+        try {
+            java.lang.reflect.Array.newInstance(void.class, 2);
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/regalloc/C1ObjectSpillInLogicOp.java	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027751
+ * @summary C1 crashes generating G1 post-barrier in Unsafe.getAndSetObject() intrinsic because of the new value spill
+ * @run main/othervm -XX:+UseG1GC C1ObjectSpillInLogicOp
+ *
+ * G1 barriers use logical operators (xor) on T_OBJECT mixed with T_LONG or T_INT.
+ * The current implementation of logical operations on x86 in C1 doesn't allow for long operands to be on stack.
+ * There is a special code in the register allocator that forces long arguments in registers on x86. However T_OBJECT
+ * can be spilled just fine, and in that case the xor emission will fail.
+ */
+
+import java.util.concurrent.atomic.*;
+public class C1ObjectSpillInLogicOp {
+  static public void main(String[] args) {
+    AtomicReferenceArray<Integer> x = new AtomicReferenceArray(128);
+    Integer y = new Integer(0);
+    for (int i = 0; i < 50000; i++) {
+      x.getAndSet(i % x.length(), y);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/memory/LargePages/TestLargePagesFlags.java	Wed Jan 15 10:53:50 2014 -0800
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestLargePagesFlags
+ * @summary Tests how large pages are choosen depending on the given large pages flag combinations.
+ * @library /testlibrary
+ * @run main TestLargePagesFlags
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.util.ArrayList;
+
+public class TestLargePagesFlags {
+
+  public static void main(String [] args) throws Exception {
+    if (!Platform.isLinux()) {
+      System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux.");
+      return;
+    }
+
+    testUseTransparentHugePages();
+    testUseHugeTLBFS();
+    testUseSHM();
+    testCombinations();
+  }
+
+  public static void testUseTransparentHugePages() throws Exception {
+    if (!canUse(UseTransparentHugePages(true))) {
+      System.out.println("Skipping testUseTransparentHugePages");
+      return;
+    }
+
+    // -XX:-UseLargePages overrides all other flags.
+    new FlagTester()
+      .use(UseLargePages(false),
+           UseTransparentHugePages(true))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Explicitly turn on UseTransparentHugePages.
+    new FlagTester()
+      .use(UseTransparentHugePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseTransparentHugePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Setting a specific large pages flag will turn
+    // off heuristics to choose large pages type.
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseTransparentHugePages(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Don't turn on UseTransparentHugePages
+    // unless the user explicitly asks for them.
+    new FlagTester()
+      .use(UseLargePages(true))
+      .expect(
+           UseTransparentHugePages(false));
+  }
+
+  public static void testUseHugeTLBFS() throws Exception {
+    if (!canUse(UseHugeTLBFS(true))) {
+      System.out.println("Skipping testUseHugeTLBFS");
+      return;
+    }
+
+    // -XX:-UseLargePages overrides all other flags.
+    new FlagTester()
+      .use(UseLargePages(false),
+           UseHugeTLBFS(true))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Explicitly turn on UseHugeTLBFS.
+    new FlagTester()
+      .use(UseHugeTLBFS(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    // Setting a specific large pages flag will turn
+    // off heuristics to choose large pages type.
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Using UseLargePages will default to UseHugeTLBFS large pages.
+    new FlagTester()
+      .use(UseLargePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+  }
+
+  public static void testUseSHM() throws Exception {
+    if (!canUse(UseSHM(true))) {
+      System.out.println("Skipping testUseSHM");
+      return;
+    }
+
+    // -XX:-UseLargePages overrides all other flags.
+    new FlagTester()
+      .use(UseLargePages(false),
+           UseSHM(true))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Explicitly turn on UseSHM.
+    new FlagTester()
+      .use(UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(true)) ;
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(true)) ;
+
+    // Setting a specific large pages flag will turn
+    // off heuristics to choose large pages type.
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseSHM(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Setting UseLargePages can allow the system to choose
+    // UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages.
+    new FlagTester()
+      .use(UseLargePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false));
+  }
+
+  public static void testCombinations() throws Exception {
+    if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) {
+      System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination");
+      return;
+    }
+
+    // UseHugeTLBFS takes precedence over SHM.
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(true));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(false))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+
+    if (!canUse(UseTransparentHugePages(true))) {
+      return;
+    }
+
+    // UseTransparentHugePages takes precedence.
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseTransparentHugePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+  }
+
+  private static class FlagTester {
+    private Flag [] useFlags;
+
+    public FlagTester use(Flag... useFlags) {
+      this.useFlags = useFlags;
+      return this;
+    }
+
+    public void expect(Flag... expectedFlags) throws Exception {
+      if (useFlags == null) {
+        throw new IllegalStateException("Must run use() before expect()");
+      }
+
+      OutputAnalyzer output = executeNewJVM(useFlags);
+
+      for (Flag flag : expectedFlags) {
+        System.out.println("Looking for: " + flag.flagString());
+        String strValue = output.firstMatch(".* " + flag.name() +  " .* :?= (\\S+).*", 1);
+
+        if (strValue == null) {
+          throw new RuntimeException("Flag " + flag.name() + " couldn't be found");
+        }
+
+        if (!flag.value().equals(strValue)) {
+          throw new RuntimeException("Wrong value for: " + flag.name()
+                                     + " expected: " + flag.value()
+                                     + " got: " + strValue);
+        }
+      }
+
+      output.shouldHaveExitValue(0);
+    }
+  }
+
+  private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception {
+    ArrayList<String> args = new ArrayList<>();
+    for (Flag flag : flags) {
+      args.add(flag.flagString());
+    }
+    args.add("-XX:+PrintFlagsFinal");
+    args.add("-version");
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    return output;
+  }
+
+  private static boolean canUse(Flag flag) {
+    try {
+      new FlagTester().use(flag).expect(flag);
+    } catch (Exception e) {
+      return false;
+    }
+
+    return true;
+  }
+
+  private static Flag UseLargePages(boolean value) {
+    return new BooleanFlag("UseLargePages", value);
+  }
+
+  private static Flag UseTransparentHugePages(boolean value) {
+    return new BooleanFlag("UseTransparentHugePages", value);
+  }
+
+  private static Flag UseHugeTLBFS(boolean value) {
+    return new BooleanFlag("UseHugeTLBFS", value);
+  }
+
+  private static Flag UseSHM(boolean value) {
+    return new BooleanFlag("UseSHM", value);
+  }
+
+  private static class BooleanFlag implements Flag {
+    private String name;
+    private boolean value;
+
+    BooleanFlag(String name, boolean value) {
+      this.name = name;
+      this.value = value;
+    }
+
+    public String flagString() {
+      return "-XX:" + (value ? "+" : "-") + name;
+    }
+
+    public String name() {
+      return name;
+    }
+
+    public String value() {
+      return Boolean.toString(value);
+    }
+  }
+
+  private static interface Flag {
+    public String flagString();
+    public String name();
+    public String value();
+  }
+}
--- a/test/testlibrary/OutputAnalyzerTest.java	Wed Jan 15 10:45:35 2014 -0800
+++ b/test/testlibrary/OutputAnalyzerTest.java	Wed Jan 15 10:53:50 2014 -0800
@@ -172,5 +172,22 @@
     } catch (RuntimeException e) {
         // expected
     }
+
+    {
+      String aaaa = "aaaa";
+      String result = output.firstMatch(aaaa);
+      if (!aaaa.equals(result)) {
+        throw new Exception("firstMatch(String) faild to match. Expected: " + aaaa + " got: " + result);
+      }
+    }
+
+    {
+      String aa = "aa";
+      String aa_grouped_aa = aa + "(" + aa + ")";
+      String result = output.firstMatch(aa_grouped_aa, 1);
+      if (!aa.equals(result)) {
+        throw new Exception("firstMatch(String, int) failed to match. Expected: " + aa + " got: " + result);
+      }
+    }
   }
 }
--- a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Wed Jan 15 10:45:35 2014 -0800
+++ b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Wed Jan 15 10:53:50 2014 -0800
@@ -241,7 +241,38 @@
   }
 
   /**
-   * Verifiy the exit value of the process
+   * Get the captured group of the first string matching the pattern.
+   * stderr is searched before stdout.
+   *
+   * @param pattern The multi-line pattern to match
+   * @param group The group to capture
+   * @return The matched string or null if no match was found
+   */
+  public String firstMatch(String pattern, int group) {
+    Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
+    Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
+    if (stderrMatcher.find()) {
+      return stderrMatcher.group(group);
+    }
+    if (stdoutMatcher.find()) {
+      return stdoutMatcher.group(group);
+    }
+    return null;
+  }
+
+  /**
+   * Get the first string matching the pattern.
+   * stderr is searched before stdout.
+   *
+   * @param pattern The multi-line pattern to match
+   * @return The matched string or null if no match was found
+   */
+  public String firstMatch(String pattern) {
+    return firstMatch(pattern, 0);
+  }
+
+  /**
+   * Verify the exit value of the process
    *
    * @param expectedExitValue Expected exit value from process
    * @throws RuntimeException If the exit value from the process did not match the expected value