changeset 1431:8a6c97c3e8d1 jigsaw-b05

Merge
author mchung
date Thu, 13 May 2010 18:26:35 -0700
parents 915ac78cb3a1 (current diff) fd3de7134574 (diff)
children 10399ce71866
files .hgtags make/linux/makefiles/mapfile-vers-debug make/linux/makefiles/mapfile-vers-product src/os/linux/vm/os_linux.cpp src/os/solaris/vm/os_solaris.cpp src/share/vm/classfile/classFileParser.cpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/classfile/vmSymbols.hpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/objArrayKlass.cpp src/share/vm/prims/jni.cpp src/share/vm/prims/jvm.cpp src/share/vm/prims/unsafe.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/arguments.hpp src/share/vm/runtime/globals.hpp
diffstat 260 files changed, 8078 insertions(+), 3526 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon May 10 19:45:24 2010 -0700
+++ b/.hgtags	Thu May 13 18:26:35 2010 -0700
@@ -84,5 +84,17 @@
 3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83
 ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
 4ac54e858f41e18ed0a669ff0da71185af43150d jigsaw-b02
+6c9796468b91dcbb39e09dfa1baf9779ac45eb66 jdk7-b85
 f59c81b1afe92d624a32743c1a6d04469e3be5f2 jigsaw-b03
 c4854f8d4afee1a32cc5ac393163c06a61eaaf0c jigsaw-b04
+418bc80ce13995149eadc9eecbba21d7a9fa02ae hs17-b10
+bf823ef06b4f211e66988d76a2e2669be5c0820e jdk7-b86
+07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01
+e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
+4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88
+15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89
+4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02
+605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90
+e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
+25f53b53aaa3eb8b2d5391a1e8de9a76ae1dd8a2 hs18-b03
+3221d1887d30341bedfdac1dbf365ea41beff20f jdk7-b92
--- a/agent/src/os/linux/libproc_impl.c	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/os/linux/libproc_impl.c	Thu May 13 18:26:35 2010 -0700
@@ -174,7 +174,7 @@
       return NULL;
    }
 
-   newlib->symtab = build_symtab(newlib->fd);
+   newlib->symtab = build_symtab(newlib->fd, libname);
    if (newlib->symtab == NULL) {
       print_debug("symbol table build failed for %s\n", newlib->name);
    }
--- a/agent/src/os/linux/ps_core.c	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/os/linux/ps_core.c	Thu May 13 18:26:35 2010 -0700
@@ -884,9 +884,12 @@
       }
 
       // read name of the shared object
-      if (read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
+      lib_name[0] = '\0';
+      if (lib_name_addr != 0 &&
+          read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
          print_debug("can't read shared object name\n");
-         return false;
+         // don't let failure to read the name stop opening the file.  If something is really wrong
+         // it will fail later.
       }
 
       if (lib_name[0] != '\0') {
--- a/agent/src/os/linux/symtab.c	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/os/linux/symtab.c	Thu May 13 18:26:35 2010 -0700
@@ -53,8 +53,274 @@
   struct hsearch_data *hash_table;
 } symtab_t;
 
-// read symbol table from given fd.
-struct symtab* build_symtab(int fd) {
+
+// Directory that contains global debuginfo files.  In theory it
+// should be possible to change this, but in a Java environment there
+// is no obvious place to put a user interface to do it.  Maybe this
+// could be set with an environment variable.
+static const char debug_file_directory[] = "/usr/lib/debug";
+
+/* The CRC used in gnu_debuglink, retrieved from
+   http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files. */
+unsigned int gnu_debuglink_crc32 (unsigned int crc,
+                                  unsigned char *buf, size_t len)
+{
+  static const unsigned int crc32_table[256] =
+    {
+      0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
+      0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
+      0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
+      0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+      0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
+      0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+      0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
+      0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+      0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
+      0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
+      0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
+      0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+      0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
+      0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
+      0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
+      0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+      0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
+      0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+      0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
+      0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+      0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
+      0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
+      0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
+      0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+      0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
+      0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
+      0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
+      0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+      0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
+      0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+      0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
+      0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+      0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
+      0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
+      0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
+      0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+      0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
+      0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
+      0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
+      0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+      0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
+      0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+      0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
+      0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+      0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
+      0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
+      0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
+      0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+      0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
+      0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
+      0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
+      0x2d02ef8d
+    };
+  unsigned char *end;
+
+  crc = ~crc & 0xffffffff;
+  for (end = buf + len; buf < end; ++buf)
+    crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
+  return ~crc & 0xffffffff;
+}
+
+/* Open a debuginfo file and check its CRC.  If it exists and the CRC
+   matches return its fd.  */
+static int
+open_debug_file (const char *pathname, unsigned int crc)
+{
+  unsigned int file_crc = 0;
+  unsigned char buffer[8 * 1024];
+
+  int fd = pathmap_open(pathname);
+
+  if (fd < 0)
+    return -1;
+
+  lseek(fd, 0, SEEK_SET);
+
+  for (;;) {
+    int len = read(fd, buffer, sizeof buffer);
+    if (len <= 0)
+      break;
+    file_crc = gnu_debuglink_crc32(file_crc, buffer, len);
+  }
+
+  if (crc == file_crc)
+    return fd;
+  else {
+    close(fd);
+    return -1;
+  }
+}
+
+/* Find an ELF section.  */
+static struct elf_section *find_section_by_name(char *name,
+                                                int fd,
+                                                ELF_EHDR *ehdr,
+                                                ELF_SHDR *shbuf,
+                                                struct elf_section *scn_cache)
+{
+  ELF_SHDR* cursct = NULL;
+  char *strtab;
+  int cnt;
+
+  if (scn_cache[ehdr->e_shstrndx].c_data == NULL) {
+    if ((scn_cache[ehdr->e_shstrndx].c_data
+         = read_section_data(fd, ehdr, cursct)) == NULL) {
+      return NULL;
+    }
+  }
+
+  strtab = scn_cache[ehdr->e_shstrndx].c_data;
+
+  for (cursct = shbuf, cnt = 0;
+       cnt < ehdr->e_shnum;
+       cnt++, cursct++) {
+    if (strcmp(cursct->sh_name + strtab, name) == 0) {
+      scn_cache[cnt].c_data = read_section_data(fd, ehdr, cursct);
+      return &scn_cache[cnt];
+    }
+  }
+
+  return NULL;
+}
+
+/* Look for a ".gnu_debuglink" section.  If one exists, try to open a
+   suitable debuginfo file.  */
+static int open_file_from_debug_link(const char *name,
+                                     int fd,
+                                     ELF_EHDR *ehdr,
+                                     ELF_SHDR *shbuf,
+                                     struct elf_section *scn_cache)
+{
+  int debug_fd;
+  struct elf_section *debug_link = find_section_by_name(".gnu_debuglink", fd, ehdr,
+                                                        shbuf, scn_cache);
+  if (debug_link == NULL)
+    return -1;
+  char *debug_filename = debug_link->c_data;
+  int offset = (strlen(debug_filename) + 4) >> 2;
+  static unsigned int crc;
+  crc = ((unsigned int*)debug_link->c_data)[offset];
+  char *debug_pathname = malloc(strlen(debug_filename)
+                                + strlen(name)
+                                + strlen(".debug/")
+                                + strlen(debug_file_directory)
+                                + 2);
+  strcpy(debug_pathname, name);
+  char *last_slash = strrchr(debug_pathname, '/');
+  if (last_slash == NULL)
+    return -1;
+
+  /* Look in the same directory as the object.  */
+  strcpy(last_slash+1, debug_filename);
+
+  debug_fd = open_debug_file(debug_pathname, crc);
+  if (debug_fd >= 0) {
+    free(debug_pathname);
+    return debug_fd;
+  }
+
+  /* Look in a subdirectory named ".debug".  */
+  strcpy(last_slash+1, ".debug/");
+  strcat(last_slash, debug_filename);
+
+  debug_fd = open_debug_file(debug_pathname, crc);
+  if (debug_fd >= 0) {
+    free(debug_pathname);
+    return debug_fd;
+  }
+
+  /* Look in /usr/lib/debug + the full pathname.  */
+  strcpy(debug_pathname, debug_file_directory);
+  strcat(debug_pathname, name);
+  last_slash = strrchr(debug_pathname, '/');
+  strcpy(last_slash+1, debug_filename);
+
+  debug_fd = open_debug_file(debug_pathname, crc);
+  if (debug_fd >= 0) {
+    free(debug_pathname);
+    return debug_fd;
+  }
+
+  free(debug_pathname);
+  return -1;
+}
+
+static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo);
+
+/* Look for a ".gnu_debuglink" section.  If one exists, try to open a
+   suitable debuginfo file and read a symbol table from it.  */
+static struct symtab *build_symtab_from_debug_link(const char *name,
+                                     int fd,
+                                     ELF_EHDR *ehdr,
+                                     ELF_SHDR *shbuf,
+                                     struct elf_section *scn_cache)
+{
+  fd = open_file_from_debug_link(name, fd, ehdr, shbuf, scn_cache);
+
+  if (fd >= 0) {
+    struct symtab *symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
+    close(fd);
+    return symtab;
+  }
+
+  return NULL;
+}
+
+// Given a build_id, find the associated debuginfo file
+static char *
+build_id_to_debug_filename (size_t size, unsigned char *data)
+{
+  char *filename, *s;
+
+  filename = malloc(strlen (debug_file_directory) + (sizeof "/.build-id/" - 1) + 1
+                    + 2 * size + (sizeof ".debug" - 1) + 1);
+  s = filename + sprintf (filename, "%s/.build-id/", debug_file_directory);
+  if (size > 0)
+    {
+      size--;
+      s += sprintf (s, "%02x", *data++);
+    }
+  if (size > 0)
+    *s++ = '/';
+  while (size-- > 0)
+    s += sprintf (s, "%02x", *data++);
+  strcpy (s, ".debug");
+
+  return filename;
+}
+
+// Read a build ID note.  Try to open any associated debuginfo file
+// and return its symtab
+static struct symtab* build_symtab_from_build_id(Elf64_Nhdr *note)
+{
+  int fd;
+  struct symtab *symtab = NULL;
+
+  unsigned char *bytes
+    = (unsigned char*)(note+1) + note->n_namesz;
+  unsigned char *filename
+    = (build_id_to_debug_filename (note->n_descsz, bytes));
+
+  fd = pathmap_open(filename);
+  if (fd >= 0) {
+    symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
+    close(fd);
+  }
+  free(filename);
+
+  return symtab;
+}
+
+// read symbol table from given fd.  If try_debuginfo) is true, also
+// try to open an associated debuginfo file
+static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo) {
   ELF_EHDR ehdr;
   char *names = NULL;
   struct symtab* symtab = NULL;
@@ -66,6 +332,7 @@
   ELF_SHDR* cursct = NULL;
   ELF_PHDR* phbuf = NULL;
   ELF_PHDR* phdr = NULL;
+  int sym_section = SHT_DYNSYM;
 
   uintptr_t baseaddr = (uintptr_t)-1;
 
@@ -90,18 +357,23 @@
 
   for (cursct = shbuf, cnt = 0; cnt < ehdr.e_shnum; cnt++) {
     scn_cache[cnt].c_shdr = cursct;
-    if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB) {
+    if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB
+        || cursct->sh_type == SHT_NOTE || cursct->sh_type == SHT_DYNSYM) {
       if ( (scn_cache[cnt].c_data = read_section_data(fd, &ehdr, cursct)) == NULL) {
          goto quit;
       }
     }
+    if (cursct->sh_type == SHT_SYMTAB) {
+      // Full symbol table available so use that
+      sym_section = cursct->sh_type;
+    }
     cursct++;
   }
 
   for (cnt = 1; cnt < ehdr.e_shnum; cnt++) {
     ELF_SHDR *shdr = scn_cache[cnt].c_shdr;
 
-    if (shdr->sh_type == SHT_SYMTAB) {
+    if (shdr->sh_type == sym_section) {
       ELF_SYM  *syms;
       int j, n, rslt;
       size_t size;
@@ -163,6 +435,45 @@
     }
   }
 
+  // Look for a separate debuginfo file.
+  if (try_debuginfo) {
+
+    // We prefer a debug symtab to an object's own symtab, so look in
+    // the debuginfo file.  We stash a copy of the old symtab in case
+    // there is no debuginfo.
+    struct symtab* prev_symtab = symtab;
+    symtab = NULL;
+
+#ifdef NT_GNU_BUILD_ID
+    // First we look for a Build ID
+    for (cursct = shbuf, cnt = 0;
+         symtab == NULL && cnt < ehdr.e_shnum;
+         cnt++) {
+      if (cursct->sh_type == SHT_NOTE) {
+        Elf64_Nhdr *note = (Elf64_Nhdr *)scn_cache[cnt].c_data;
+        if (note->n_type == NT_GNU_BUILD_ID) {
+          symtab = build_symtab_from_build_id(note);
+        }
+      }
+      cursct++;
+    }
+#endif
+
+    // Then, if that doesn't work, the debug link
+    if (symtab == NULL) {
+      symtab = build_symtab_from_debug_link(filename, fd, &ehdr, shbuf,
+                                            scn_cache);
+    }
+
+    // If we still haven't found a symtab, use the object's own symtab.
+    if (symtab != NULL) {
+      if (prev_symtab != NULL)
+        destroy_symtab(prev_symtab);
+    } else {
+      symtab = prev_symtab;
+    }
+  }
+
 quit:
   if (shbuf) free(shbuf);
   if (phbuf) free(phbuf);
@@ -177,6 +488,11 @@
   return symtab;
 }
 
+struct symtab* build_symtab(int fd, const char *filename) {
+  return build_symtab_internal(fd, filename, /* try_debuginfo */ true);
+}
+
+
 void destroy_symtab(struct symtab* symtab) {
   if (!symtab) return;
   if (symtab->strs) free(symtab->strs);
--- a/agent/src/os/linux/symtab.h	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/os/linux/symtab.h	Thu May 13 18:26:35 2010 -0700
@@ -32,7 +32,7 @@
 struct symtab;
 
 // build symbol table for a given ELF file descriptor
-struct symtab* build_symtab(int fd);
+struct symtab* build_symtab(int fd, const char *filename);
 
 // destroy the symbol table
 void destroy_symtab(struct symtab* symtab);
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -494,6 +494,68 @@
                 }
             }
         },
+        new Command("revptrs", "revptrs address", false) {
+            public void doit(Tokens t) {
+                int tokens = t.countTokens();
+                if (tokens != 1 && (tokens != 2 || !t.nextToken().equals("-c"))) {
+                    usage();
+                    return;
+                }
+                boolean chase = tokens == 2;
+                ReversePtrs revptrs = VM.getVM().getRevPtrs();
+                if (revptrs == null) {
+                    out.println("Computing reverse pointers...");
+                    ReversePtrsAnalysis analysis = new ReversePtrsAnalysis();
+                    final boolean[] complete = new boolean[1];
+                    HeapProgressThunk thunk = new HeapProgressThunk() {
+                            public void heapIterationFractionUpdate(double d) {}
+                            public synchronized void heapIterationComplete() {
+                                complete[0] = true;
+                                notify();
+                            }
+                        };
+                    analysis.setHeapProgressThunk(thunk);
+                    analysis.run();
+                    while (!complete[0]) {
+                        synchronized (thunk) {
+                            try {
+                                thunk.wait();
+                            } catch (Exception e) {
+                            }
+                        }
+                    }
+                    revptrs = VM.getVM().getRevPtrs();
+                    out.println("Done.");
+                }
+                Address a = VM.getVM().getDebugger().parseAddress(t.nextToken());
+                if (VM.getVM().getUniverse().heap().isInReserved(a)) {
+                    OopHandle handle = a.addOffsetToAsOopHandle(0);
+                    Oop oop = VM.getVM().getObjectHeap().newOop(handle);
+                    ArrayList ptrs = revptrs.get(oop);
+                    if (ptrs == null) {
+                        out.println("no live references to " + a);
+                    } else {
+                        if (chase) {
+                            while (ptrs.size() == 1) {
+                                LivenessPathElement e = (LivenessPathElement)ptrs.get(0);
+                                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                                Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
+                                out.println(bos.toString());
+                                ptrs = revptrs.get(e.getObj());
+                            }
+                        } else {
+                            for (int i = 0; i < ptrs.size(); i++) {
+                                LivenessPathElement e = (LivenessPathElement)ptrs.get(i);
+                                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                                Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
+                                out.println(bos.toString());
+                                oop = e.getObj();
+                            }
+                        }
+                    }
+                }
+            }
+        },
         new Command("inspect", "inspect expression", false) {
             public void doit(Tokens t) {
                 if (t.countTokens() != 1) {
@@ -816,8 +878,24 @@
                     dumpType(type);
                 } else {
                     Iterator i = agent.getTypeDataBase().getTypes();
+                    // Make sure the types are emitted in an order than can be read back in
+                    HashSet emitted = new HashSet();
+                    Stack pending = new Stack();
                     while (i.hasNext()) {
-                        dumpType((Type)i.next());
+                        Type n = (Type)i.next();
+                        if (emitted.contains(n.getName())) {
+                            continue;
+                        }
+
+                        while (n != null && !emitted.contains(n.getName())) {
+                            pending.push(n);
+                            n = n.getSuperclass();
+                        }
+                        while (!pending.empty()) {
+                            n = (Type)pending.pop();
+                            dumpType(n);
+                            emitted.add(n.getName());
+                        }
                     }
                 }
             }
@@ -846,83 +924,105 @@
 
             }
         },
-        new Command("search", "search [ heap | codecache | threads ] value", false) {
+        new Command("search", "search [ heap | perm | rawheap | codecache | threads ] value", false) {
             public void doit(Tokens t) {
                 if (t.countTokens() != 2) {
                     usage();
-                } else {
-                    String type = t.nextToken();
-                    final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
-                    final long stride = VM.getVM().getAddressSize();
-                    if (type.equals("threads")) {
-                        Threads threads = VM.getVM().getThreads();
-                        for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
-                            Address base = thread.getBaseOfStackPointer();
-                            Address end = thread.getLastJavaSP();
-                            if (end == null) continue;
-                            if (end.lessThan(base)) {
-                                Address tmp = base;
-                                base = end;
-                                end = tmp;
+                    return;
+                }
+                String type = t.nextToken();
+                final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
+                final long stride = VM.getVM().getAddressSize();
+                if (type.equals("threads")) {
+                    Threads threads = VM.getVM().getThreads();
+                    for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
+                        Address base = thread.getBaseOfStackPointer();
+                        Address end = thread.getLastJavaSP();
+                        if (end == null) continue;
+                        if (end.lessThan(base)) {
+                            Address tmp = base;
+                            base = end;
+                            end = tmp;
+                        }
+                        out.println("Searching " + base + " " + end);
+                        while (base != null && base.lessThan(end)) {
+                            Address val = base.getAddressAt(0);
+                            if (AddressOps.equal(val, value)) {
+                                out.println(base);
+                            }
+                            base = base.addOffsetTo(stride);
+                        }
+                    }
+                } else if (type.equals("rawheap")) {
+                    RawHeapVisitor iterator = new RawHeapVisitor() {
+                            public void prologue(long used) {
                             }
-                            out.println("Searching " + base + " " + end);
-                            while (base != null && base.lessThan(end)) {
-                                Address val = base.getAddressAt(0);
+
+                            public void visitAddress(Address addr) {
+                                Address val = addr.getAddressAt(0);
                                 if (AddressOps.equal(val, value)) {
-                                    out.println(base);
+                                        out.println("found at " + addr);
                                 }
-                                base = base.addOffsetTo(stride);
+                            }
+                            public void visitCompOopAddress(Address addr) {
+                                Address val = addr.getCompOopAddressAt(0);
+                                if (AddressOps.equal(val, value)) {
+                                    out.println("found at " + addr);
+                                }
                             }
-                        }
-                    } else if (type.equals("heap")) {
-                        RawHeapVisitor iterator = new RawHeapVisitor() {
-                                public void prologue(long used) {
-                                }
-
-                                public void visitAddress(Address addr) {
-                                    Address val = addr.getAddressAt(0);
+                            public void epilogue() {
+                            }
+                        };
+                    VM.getVM().getObjectHeap().iterateRaw(iterator);
+                } else if (type.equals("heap") || type.equals("perm")) {
+                    HeapVisitor iterator = new DefaultHeapVisitor() {
+                            public boolean doObj(Oop obj) {
+                                int index = 0;
+                                Address start = obj.getHandle();
+                                long end = obj.getObjectSize();
+                                while (index < end) {
+                                    Address val = start.getAddressAt(index);
                                     if (AddressOps.equal(val, value)) {
-                                        out.println("found at " + addr);
+                                        out.println("found in " + obj.getHandle());
+                                        break;
                                     }
-                                }
-                                public void visitCompOopAddress(Address addr) {
-                                    Address val = addr.getCompOopAddressAt(0);
-                                    if (AddressOps.equal(val, value)) {
-                                        out.println("found at " + addr);
-                                    }
-                                }
-                                public void epilogue() {
-                                }
-                            };
-                        VM.getVM().getObjectHeap().iterateRaw(iterator);
-                    } else if (type.equals("codecache")) {
-                        CodeCacheVisitor v = new CodeCacheVisitor() {
-                                public void prologue(Address start, Address end) {
+                                    index += 4;
                                 }
-                                public void visit(CodeBlob blob) {
-                                    boolean printed = false;
-                                    Address base = blob.getAddress();
-                                    Address end = base.addOffsetTo(blob.getSize());
-                                    while (base != null && base.lessThan(end)) {
-                                        Address val = base.getAddressAt(0);
-                                        if (AddressOps.equal(val, value)) {
-                                            if (!printed) {
-                                                printed = true;
-                                                blob.printOn(out);
-                                            }
-                                            out.println("found at " + base + "\n");
+                                return false;
+                            }
+                        };
+                    if (type.equals("heap")) {
+                        VM.getVM().getObjectHeap().iterate(iterator);
+                    } else {
+                        VM.getVM().getObjectHeap().iteratePerm(iterator);
+                    }
+                } else if (type.equals("codecache")) {
+                    CodeCacheVisitor v = new CodeCacheVisitor() {
+                            public void prologue(Address start, Address end) {
+                            }
+                            public void visit(CodeBlob blob) {
+                                boolean printed = false;
+                                Address base = blob.getAddress();
+                                Address end = base.addOffsetTo(blob.getSize());
+                                while (base != null && base.lessThan(end)) {
+                                    Address val = base.getAddressAt(0);
+                                    if (AddressOps.equal(val, value)) {
+                                        if (!printed) {
+                                            printed = true;
+                                            blob.printOn(out);
                                         }
-                                        base = base.addOffsetTo(stride);
+                                        out.println("found at " + base + "\n");
                                     }
+                                    base = base.addOffsetTo(stride);
                                 }
-                                public void epilogue() {
-                                }
+                            }
+                            public void epilogue() {
+                            }
 
 
-                            };
-                        VM.getVM().getCodeCache().iterate(v);
+                        };
+                    VM.getVM().getCodeCache().iterate(v);
 
-                    }
                 }
             }
         },
@@ -957,12 +1057,19 @@
                     Threads threads = VM.getVM().getThreads();
                     boolean all = name.equals("-a");
                     for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
-                        StringWriter sw = new StringWriter();
                         ByteArrayOutputStream bos = new ByteArrayOutputStream();
                         thread.printThreadIDOn(new PrintStream(bos));
                         if (all || bos.toString().equals(name)) {
+                            out.println(bos.toString() + " = " + thread.getAddress());
                             HTMLGenerator gen = new HTMLGenerator(false);
-                            out.println(gen.genHTMLForJavaStackTrace(thread));
+                            try {
+                                out.println(gen.genHTMLForJavaStackTrace(thread));
+                            } catch (Exception e) {
+                                err.println("Error: " + e);
+                                if (verboseExceptions) {
+                                    e.printStackTrace(err);
+                                }
+                            }
                             if (!all) return;
                         }
                     }
@@ -970,6 +1077,26 @@
                 }
             }
         },
+        new Command("thread", "thread { -a | id }", false) {
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1) {
+                    usage();
+                } else {
+                    String name = t.nextToken();
+                    Threads threads = VM.getVM().getThreads();
+                    boolean all = name.equals("-a");
+                    for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
+                        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                        thread.printThreadIDOn(new PrintStream(bos));
+                        if (all || bos.toString().equals(name)) {
+                            out.println(bos.toString() + " = " + thread.getAddress());
+                            if (!all) return;
+                        }
+                    }
+                    out.println("Couldn't find thread " + name);
+                }
+            }
+        },
 
         new Command("threads", false) {
             public void doit(Tokens t) {
@@ -1161,7 +1288,7 @@
         }
     }
 
-    static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*))");
+    static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*)|(![a-zA-Z][^ ]*))");
 
     public void executeCommand(String ln) {
         if (ln.indexOf('!') != -1) {
@@ -1195,14 +1322,37 @@
                         result.append(item.at(item.countTokens() - 1));
                     } else {
                         String tail = cmd.substring(1);
-                        int index = Integer.parseInt(tail);
-                        if (index < 0) {
-                            index = history.size() + index;
+                        switch (tail.charAt(0)) {
+                        case '0':
+                        case '1':
+                        case '2':
+                        case '3':
+                        case '4':
+                        case '5':
+                        case '6':
+                        case '7':
+                        case '8':
+                        case '9':
+                        case '-': {
+                            int index = Integer.parseInt(tail);
+                            if (index < 0) {
+                                index = history.size() + index;
+                            }
+                            if (index > size) {
+                                err.println("No such history item");
+                            } else {
+                                result.append((String)history.get(index));
+                            }
+                            break;
                         }
-                        if (index > size) {
-                            err.println("No such history item");
-                        } else {
-                            result.append((String)history.get(index));
+                        default: {
+                            for (int i = history.size() - 1; i >= 0; i--) {
+                                String s = (String)history.get(i);
+                                if (s.startsWith(tail)) {
+                                    result.append(s);
+                                }
+                            }
+                        }
                         }
                     }
                 }
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -985,6 +985,12 @@
               annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(),
                                                      curFrame.addressOfInterpreterFrameTOS(),
                                                      "Interpreter expression stack"));
+              Address monBegin = curFrame.interpreterFrameMonitorBegin().address();
+              Address monEnd = curFrame.interpreterFrameMonitorEnd().address();
+              if (!monBegin.equals(monEnd)) {
+                  annoPanel.addAnnotation(new Annotation(monBegin, monEnd,
+                                                         "BasicObjectLocks"));
+              }
               if (interpreterFrameMethod != null) {
                 // The offset is just to get the right stack slots highlighted in the output
                 int offset = 1;
--- a/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2003 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,7 +294,7 @@
     attachDialog.setSize(400, 300);
     GraphicsUtilities.centerInContainer(attachDialog.getComponent(),
                                         getParentDimension(attachDialog.getComponent()));
-    attachDialog.show();
+    attachDialog.setVisible(true);
   }
 
   public void showThreadsDialog() {
@@ -321,7 +321,7 @@
                                            getParentDimension(threadsDialog.getComponent()));
     GraphicsUtilities.centerInContainer(threadsDialog.getComponent(),
                                         getParentDimension(threadsDialog.getComponent()));
-    threadsDialog.show();
+    threadsDialog.setVisible(true);
   }
 
   public void showMemoryDialog() {
@@ -341,7 +341,7 @@
                                            getParentDimension(memoryDialog.getComponent()));
     GraphicsUtilities.centerInContainer(memoryDialog.getComponent(),
                                         getParentDimension(memoryDialog.getComponent()));
-    memoryDialog.show();
+    memoryDialog.setVisible(true);
   }
 
   /** Changes the editor factory this debugger uses to display source
@@ -530,7 +530,7 @@
       addFrame(stackFrame);
       stackFrame.setSize(400, 200);
       GraphicsUtilities.moveToInContainer(stackFrame.getComponent(), 0.0f, 1.0f, 0, 20);
-      stackFrame.show();
+      stackFrame.setVisible(true);
 
       // Create register panel
       registerPanel = new RegisterPanel();
@@ -544,7 +544,7 @@
       registerFrame.setSize(225, 200);
       GraphicsUtilities.moveToInContainer(registerFrame.getComponent(),
                                           1.0f, 0.0f, 0, 0);
-      registerFrame.show();
+      registerFrame.setVisible(true);
 
       resetCurrentThread();
     } catch (DebuggerException e) {
@@ -979,7 +979,7 @@
                                                1.0f,
                                                0.85f,
                                                getParentDimension(editorFrame.getComponent()));
-        editorFrame.show();
+        editorFrame.setVisible(true);
         shown = true;
       }
       code.showLineNumber(lineNo);
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,10 +96,6 @@
       addBytecodeClass(Bytecodes._dstore, BytecodeStore.class);
       addBytecodeClass(Bytecodes._astore, BytecodeStore.class);
       addBytecodeClass(Bytecodes._tableswitch, BytecodeTableswitch.class);
-
-      // only special fast_xxx cases. others are handled differently.
-      addBytecodeClass(Bytecodes._fast_iaccess_0, BytecodeFastAAccess0.class);
-      addBytecodeClass(Bytecodes._fast_aaccess_0, BytecodeFastIAccess0.class);
    }
 
    public BytecodeDisassembler(Method method) {
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -263,11 +263,12 @@
     case JVM_CONSTANT_NameAndType:        return "JVM_CONSTANT_NameAndType";
     case JVM_CONSTANT_Invalid:            return "JVM_CONSTANT_Invalid";
     case JVM_CONSTANT_UnresolvedClass:    return "JVM_CONSTANT_UnresolvedClass";
+    case JVM_CONSTANT_UnresolvedClassInError:    return "JVM_CONSTANT_UnresolvedClassInError";
     case JVM_CONSTANT_ClassIndex:         return "JVM_CONSTANT_ClassIndex";
     case JVM_CONSTANT_UnresolvedString:   return "JVM_CONSTANT_UnresolvedString";
     case JVM_CONSTANT_StringIndex:        return "JVM_CONSTANT_StringIndex";
     }
-    throw new InternalError("unknown tag");
+    throw new InternalError("Unknown tag: " + tag);
   }
 
   public void iterateFields(OopVisitor visitor, boolean doVMFields) {
@@ -304,6 +305,7 @@
           index++;
           break;
 
+        case JVM_CONSTANT_UnresolvedClassInError:
         case JVM_CONSTANT_UnresolvedClass:
         case JVM_CONSTANT_Class:
         case JVM_CONSTANT_UnresolvedString:
@@ -409,6 +411,7 @@
               }
 
               // case JVM_CONSTANT_ClassIndex:
+              case JVM_CONSTANT_UnresolvedClassInError:
               case JVM_CONSTANT_UnresolvedClass: {
                   dos.writeByte(JVM_CONSTANT_Class);
                   String klassName = getSymbolAt(ci).asString();
@@ -464,6 +467,8 @@
                                           + ", type = " + signatureIndex);
                   break;
               }
+              default:
+                  throw new InternalError("unknown tag: " + cpConstType);
           } // switch
       }
       dos.flush();
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,9 @@
     // Temporary tag while constructing constant pool
     public static final int JVM_CONSTANT_StringIndex        = 103;
 
+    // Temporary tag while constructing constant pool
+    public static final int JVM_CONSTANT_UnresolvedClassInError = 104;
+
     // 1.5 major/minor version numbers from JVM spec. 3rd edition
     public static final short MAJOR_VERSION = 49;
     public static final short MINOR_VERSION = 0;
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/SignatureIterator.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/SignatureIterator.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,7 +108,7 @@
         return BasicTypeSize.getTArraySize();
       }
     }
-    throw new RuntimeException("Should not reach here");
+    throw new RuntimeException("Should not reach here: char " + (char)_signature.getByteAt(_index) + " @ " + _index + " in " + _signature.asString());
   }
   protected void checkSignatureEnd() {
     if (_index < _signature.getLength()) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -238,6 +238,7 @@
                 }
 
                 // case JVM_CONSTANT_ClassIndex:
+                case JVM_CONSTANT_UnresolvedClassInError:
                 case JVM_CONSTANT_UnresolvedClass: {
                      dos.writeByte(JVM_CONSTANT_Class);
                      String klassName = cpool.getSymbolAt(ci).asString();
@@ -296,6 +297,8 @@
                                         + ", type = " + signatureIndex);
                      break;
                 }
+                default:
+                  throw new InternalError("Unknown tag: " + cpConstType);
             } // switch
         }
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/FrameWrapper.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/FrameWrapper.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,6 @@
   public void       setVisible(boolean visible);
   public void       setSize(int x, int y);
   public void       pack();
-  public void       show();
   public void       dispose();
   public void       setBackground(Color color);
   public void       setResizable(boolean resizable);
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -95,8 +95,10 @@
 
         // list tags
         void beginList()  { beginTag("ul"); nl(); }
+        void endList()    { endTag("ul"); nl();   }
+        void beginListItem() { beginTag("li"); }
+        void endListItem()   { endTag("li"); nl();   }
         void li(String s) { wrap("li", s); nl();  }
-        void endList()    { endTag("ul"); nl();   }
 
         // table tags
         void beginTable(int border) {
@@ -505,6 +507,11 @@
                buf.cell(cpool.getSymbolAt(index).asString());
                break;
 
+            case JVM_CONSTANT_UnresolvedClassInError:
+               buf.cell("JVM_CONSTANT_UnresolvedClassInError");
+               buf.cell(cpool.getSymbolAt(index).asString());
+               break;
+
             case JVM_CONSTANT_Class:
                buf.cell("JVM_CONSTANT_Class");
                Klass klass = (Klass) cpool.getObjAt(index);
@@ -564,6 +571,9 @@
                buf.cell("JVM_CONSTANT_StringIndex");
                buf.cell(Integer.toString(cpool.getIntAt(index)));
                break;
+
+            default:
+               throw new InternalError("unknown tag: " + ctag);
          }
 
          buf.endTag("tr");
@@ -671,7 +681,16 @@
                              buf.cell(Integer.toString(curBci) + spaces);
 
                              buf.beginTag("td");
-                             String instrStr = escapeHTMLSpecialChars(instr.toString());
+                             String instrStr = null;
+                             try {
+                                 instrStr = escapeHTMLSpecialChars(instr.toString());
+                             } catch (RuntimeException re) {
+                                 buf.append("exception during bytecode processing");
+                                 buf.endTag("td");
+                                 buf.endTag("tr");
+                                 re.printStackTrace();
+                                 return;
+                             }
 
                              if (instr instanceof BytecodeNew) {
                                 BytecodeNew newBytecode = (BytecodeNew) instr;
@@ -1396,9 +1415,7 @@
          final SymbolFinder symFinder = createSymbolFinder();
          final Disassembler disasm = createDisassembler(startPc, code);
          class NMethodVisitor implements InstructionVisitor {
-            boolean prevWasCall;
             public void prologue() {
-               prevWasCall = false;
             }
 
             public void visit(long currentPc, Instruction instr) {
@@ -1418,8 +1435,7 @@
 
                PCDesc pcDesc = (PCDesc) safepoints.get(longToAddress(currentPc));
 
-               boolean isSafepoint = (pcDesc != null);
-               if (isSafepoint && prevWasCall) {
+               if (pcDesc != null) {
                   buf.append(genSafepointInfo(nmethod, pcDesc));
                }
 
@@ -1435,11 +1451,6 @@
                }
 
                buf.br();
-               if (isSafepoint && !prevWasCall) {
-                 buf.append(genSafepointInfo(nmethod, pcDesc));
-               }
-
-               prevWasCall = instr.isCall();
             }
 
             public void epilogue() {
@@ -1783,22 +1794,20 @@
          buf.h3("Fields");
          buf.beginList();
          for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) {
-           int nameIndex = fields.getShortAt(f + InstanceKlass.NAME_INDEX_OFFSET);
-           int sigIndex  = fields.getShortAt(f + InstanceKlass.SIGNATURE_INDEX_OFFSET);
-           int genSigIndex = fields.getShortAt(f + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET);
-           Symbol f_name = cp.getSymbolAt(nameIndex);
-           Symbol f_sig  = cp.getSymbolAt(sigIndex);
-           Symbol f_genSig = (genSigIndex != 0)? cp.getSymbolAt(genSigIndex) : null;
-           AccessFlags acc = new AccessFlags(fields.getShortAt(f + InstanceKlass.ACCESS_FLAGS_OFFSET));
+           sun.jvm.hotspot.oops.Field field = klass.getFieldByIndex(f);
+           String f_name = ((NamedFieldIdentifier)field.getID()).getName();
+           Symbol f_sig  = field.getSignature();
+           Symbol f_genSig = field.getGenericSignature();
+           AccessFlags acc = field.getAccessFlagsObj();
 
-           buf.beginTag("li");
+           buf.beginListItem();
            buf.append(genFieldModifierString(acc));
            buf.append(' ');
            Formatter sigBuf = new Formatter(genHTML);
            new SignatureConverter(f_sig, sigBuf.getBuffer()).dispatchField();
            buf.append(sigBuf.toString().replace('/', '.'));
            buf.append(' ');
-           buf.append(f_name.asString());
+           buf.append(f_name);
            buf.append(';');
            // is it generic?
            if (f_genSig != null) {
@@ -1806,7 +1815,8 @@
               buf.append(escapeHTMLSpecialChars(f_genSig.asString()));
               buf.append("] ");
            }
-           buf.endTag("li");
+           buf.append(" (offset = " + field.getOffset() + ")");
+           buf.endListItem();
          }
          buf.endList();
       }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Assert.java	Mon May 10 19:45:24 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Assert.java	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
   public static boolean ASSERTS_ENABLED = true;
 
   public static void that(boolean test, String message) {
-    if (!test) {
+    if (ASSERTS_ENABLED && !test) {
       throw new AssertionFailure(message);
     }
   }
--- a/make/hotspot_version	Mon May 10 19:45:24 2010 -0700
+++ b/make/hotspot_version	Thu May 13 18:26:35 2010 -0700
@@ -31,11 +31,11 @@
 #
 
 # Don't put quotes (fail windows build).
-HOTSPOT_VM_COPYRIGHT=Copyright 2009
+HOTSPOT_VM_COPYRIGHT=Copyright 2010
 
-HS_MAJOR_VER=17
+HS_MAJOR_VER=18
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=10
+HS_BUILD_NUMBER=04
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/linux/makefiles/adlc.make	Mon May 10 19:45:24 2010 -0700
+++ b/make/linux/makefiles/adlc.make	Thu May 13 18:26:35 2010 -0700
@@ -127,6 +127,9 @@
 # Note that product files are updated via "mv", which is atomic.
 TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
 
+# Debuggable by default
+CFLAGS += -g
+
 # Pass -D flags into ADLC.
 ADLCFLAGS += $(SYSDEFS)
 
@@ -135,7 +138,7 @@
 
 # Normally, debugging is done directly on the ad_<arch>*.cpp files.
 # But -g will put #line directives in those files pointing back to <arch>.ad.
-#ADLCFLAGS += -g
+ADLCFLAGS += -g
 
 ifdef LP64
 ADLCFLAGS += -D_LP64
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/makefiles/build_vm_def.sh	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+nm --defined-only $* | awk '
+   { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" }
+   '
--- a/make/linux/makefiles/mapfile-vers-debug	Mon May 10 19:45:24 2010 -0700
+++ b/make/linux/makefiles/mapfile-vers-debug	Thu May 13 18:26:35 2010 -0700
@@ -291,6 +291,9 @@
 
                 # This is for Forte Analyzer profiling support.
                 AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
+
         local:
                 *;
 };
--- a/make/linux/makefiles/mapfile-vers-product	Mon May 10 19:45:24 2010 -0700
+++ b/make/linux/makefiles/mapfile-vers-product	Thu May 13 18:26:35 2010 -0700
@@ -286,6 +286,9 @@
 
                 # This is for Forte Analyzer profiling support.
                 AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
+
         local:
                 *;
 };
--- a/make/linux/makefiles/vm.make	Mon May 10 19:45:24 2010 -0700
+++ b/make/linux/makefiles/vm.make	Thu May 13 18:26:35 2010 -0700
@@ -121,14 +121,21 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE)
+mapfile : $(MAPFILE) vm.def
 	rm -f $@
-	cat $^ > $@
+	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
+                 { system ("cat vm.def"); }		\
+               else					\
+                 { print $$0 }				\
+             }' > $@ < $(MAPFILE)
 
 mapfile_reorder : mapfile $(REORDERFILE)
 	rm -f $@
 	cat $^ > $@
 
+vm.def: $(Res_Files) $(Obj_Files)
+	sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
+
 ifeq ($(ZERO_LIBARCH), ppc64)
   STATIC_CXX = false
 else
--- a/make/solaris/makefiles/adlc.make	Mon May 10 19:45:24 2010 -0700
+++ b/make/solaris/makefiles/adlc.make	Thu May 13 18:26:35 2010 -0700
@@ -147,6 +147,9 @@
 # Note that product files are updated via "mv", which is atomic.
 TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
 
+# Debuggable by default
+CFLAGS += -g
+
 # Pass -D flags into ADLC.
 ADLCFLAGS += $(SYSDEFS)
 
@@ -155,7 +158,7 @@
 
 # Normally, debugging is done directly on the ad_<arch>*.cpp files.
 # But -g will put #line directives in those files pointing back to <arch>.ad.
-#ADLCFLAGS += -g
+ADLCFLAGS += -g
 
 ifdef LP64
 ADLCFLAGS += -D_LP64
--- a/make/windows/build.bat	Mon May 10 19:45:24 2010 -0700
+++ b/make/windows/build.bat	Thu May 13 18:26:35 2010 -0700
@@ -28,6 +28,9 @@
 REM Since we don't have uname and we could be cross-compiling,
 REM Use the compiler to determine which ARCH we are building
 REM 
+REM Note: Running this batch file from the Windows command shell requires
+REM that "grep" be accessible on the PATH. An MKS install does this.
+REM 
 cl 2>&1 | grep "IA-64" >NUL
 if %errorlevel% == 0 goto isia64
 cl 2>&1 | grep "AMD64" >NUL
@@ -57,11 +60,12 @@
 if "%1" == "product"   goto test1
 if "%1" == "debug"     goto test1
 if "%1" == "fastdebug" goto test1
+if "%1" == "tree"      goto test1
 goto usage
 
 :test1
 if "%2" == "core"      goto test2
-if "%2" == "kernel"   goto test2
+if "%2" == "kernel"    goto test2
 if "%2" == "compiler1" goto test2
 if "%2" == "compiler2" goto test2
 if "%2" == "tiered"    goto test2
@@ -70,6 +74,7 @@
 goto usage
 
 :test2
+if "%1" == "tree"      goto build_tree
 REM check_j2se_version
 REM jvmti.make requires J2SE 1.4.x or newer.
 REM If not found then fail fast.
@@ -93,6 +98,10 @@
 nmake -f %3/make/windows/build.make Variant=compiler2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION=%5 ADLC_ONLY=1 %1
 goto end
 
+:build_tree
+nmake -f %3/make/windows/build.make Variant=%2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION="%5" %1
+goto end
+
 :usage
 echo Usage: build flavor version workspace bootstrap_dir [build_id] [windbg_home]
 echo.
@@ -100,8 +109,10 @@
 echo flavor is "product", "debug" or "fastdebug",
 echo version is "core", "kernel", "compiler1", "compiler2", or "tiered",
 echo workspace is source directory without trailing slash, 
-echo bootstrap_dir is a full path to echo a JDK in which bin/java 
-echo   and bin/javac are present and working, and echo build_id is an 
+echo bootstrap_dir is a full path to a JDK in which bin/java 
+echo   and bin/javac are present and working, and build_id is an 
 echo   optional build identifier displayed by java -version
+exit /b 1
 
 :end
+exit /b %errorlevel%
--- a/make/windows/build.make	Mon May 10 19:45:24 2010 -0700
+++ b/make/windows/build.make	Thu May 13 18:26:35 2010 -0700
@@ -27,6 +27,9 @@
 # environment variables (Variant, WorkSpace, BootStrapDir, BuildUser, HOTSPOT_BUILD_VERSION)
 # are passed in as command line arguments.
 
+# Note: Running nmake or build.bat from the Windows command shell requires
+# that "sh" be accessible on the PATH. An MKS install does this.
+
 # SA components are built if BUILD_WIN_SA=1 is specified.
 # See notes in README. This produces files:
 #  1. sa-jdi.jar       - This is built before building jvm.dll
@@ -233,6 +236,12 @@
 	cd $(variantDir)
 	nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH)
 
+# target to create just the directory structure
+tree: checks $(variantDir) $(variantDir)\local.make sanity
+	mkdir $(variantDir)\product
+	mkdir $(variantDir)\debug
+	mkdir $(variantDir)\fastdebug
+
 sanity:
 	@ echo;
 	@ cd $(variantDir)
--- a/make/windows/create.bat	Mon May 10 19:45:24 2010 -0700
+++ b/make/windows/create.bat	Thu May 13 18:26:35 2010 -0700
@@ -36,6 +36,9 @@
 REM Since we don't have uname and we could be cross-compiling,
 REM Use the compiler to determine which ARCH we are building
 REM 
+REM Note: Running this batch file from the Windows command shell requires
+REM that "grep" be accessible on the PATH. An MKS install does this.
+REM 
 cl 2>&1 | grep "IA-64" >NUL
 if %errorlevel% == 0 goto isia64
 cl 2>&1 | grep "AMD64" >NUL
--- a/make/windows/get_msc_ver.sh	Mon May 10 19:45:24 2010 -0700
+++ b/make/windows/get_msc_ver.sh	Thu May 13 18:26:35 2010 -0700
@@ -22,6 +22,8 @@
 #  
 #
 
+set -e
+
 # This shell script echoes "MSC_VER=<munged version of cl>"
 # It ignores the micro version component.
 # Examples:
@@ -38,17 +40,20 @@
 # sh, and it has been found that sometimes `which sh` fails.
 
 if [ "x$HotSpotMksHome" != "x" ]; then
- MKS_HOME="$HotSpotMksHome"
+  TOOL_DIR="$HotSpotMksHome"
 else
- SH=`which sh`
- MKS_HOME=`dirname "$SH"`
+  # HotSpotMksHome is not set so use the directory that contains "sh".
+  # This works with both MKS and Cygwin.
+  SH=`which sh`
+  TOOL_DIR=`dirname "$SH"`
 fi
 
-HEAD="$MKS_HOME/head"
-ECHO="$MKS_HOME/echo"
-EXPR="$MKS_HOME/expr"
-CUT="$MKS_HOME/cut"
-SED="$MKS_HOME/sed"
+DIRNAME="$TOOL_DIR/dirname"
+HEAD="$TOOL_DIR/head"
+ECHO="$TOOL_DIR/echo"
+EXPR="$TOOL_DIR/expr"
+CUT="$TOOL_DIR/cut"
+SED="$TOOL_DIR/sed"
 
 if [ "x$FORCE_MSC_VER" != "x" ]; then
   echo "MSC_VER=$FORCE_MSC_VER"
@@ -70,7 +75,15 @@
 if [ "x$FORCE_LINK_VER" != "x" ]; then
   echo "LINK_VER=$FORCE_LINK_VER"
 else
-  LINK_VER_RAW=`link 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
+  # use the "link" command that is co-located with the "cl" command
+  cl_cmd=`which cl`
+  if [ "x$cl_cmd" != "x" ]; then
+    link_cmd=`$DIRNAME "$cl_cmd"`/link
+  else
+    # which can't find "cl" so just use which ever "link" we find
+    link_cmd="link"
+  fi
+  LINK_VER_RAW=`"$link_cmd" 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
   LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1`
   LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2`
   LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3`
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Thu May 13 18:26:35 2010 -0700
@@ -661,9 +661,6 @@
     stx_op3      = 0x0e,
     swap_op3     = 0x0f,
 
-    lduwa_op3    = 0x10,
-    ldxa_op3     = 0x1b,
-
     stwa_op3     = 0x14,
     stxa_op3     = 0x1e,
 
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -377,6 +377,16 @@
 
 }
 
+
+void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
+  __ bind(_entry);
+  __ call(SharedRuntime::deopt_blob()->unpack_with_reexecution());
+  __ delayed()->nop();
+  ce->add_call_info_here(_info);
+  debug_only(__ should_not_reach_here());
+}
+
+
 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
   //---------------slow case: call to native-----------------
   __ bind(_entry);
--- a/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -143,3 +143,6 @@
 
   static bool is_caller_save_register (LIR_Opr  reg);
   static bool is_caller_save_register (Register r);
+
+  // JSR 292
+  static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -378,12 +378,7 @@
 
   int offset = code_offset();
 
-  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
-    __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
-    __ delayed()->nop();
-  }
-
-  __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
   __ delayed()->nop();
   debug_only(__ stop("should have gone to the caller");)
   assert(code_offset() - offset <= exception_handler_size, "overflow");
@@ -393,6 +388,60 @@
 }
 
 
+// Emit the code to remove the frame from the stack in the exception
+// unwind path.
+int LIR_Assembler::emit_unwind_handler() {
+#ifndef PRODUCT
+  if (CommentedAssembly) {
+    _masm->block_comment("Unwind handler");
+  }
+#endif
+
+  int offset = code_offset();
+
+  // Fetch the exception from TLS and clear out exception related thread state
+  __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
+  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
+  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
+
+  __ bind(_unwind_handler_entry);
+  __ verify_not_null_oop(O0);
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(O0, I0);  // Preserve the exception
+  }
+
+  // Preform needed unlocking
+  MonitorExitStub* stub = NULL;
+  if (method()->is_synchronized()) {
+    monitor_address(0, FrameMap::I1_opr);
+    stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
+    __ unlock_object(I3, I2, I1, *stub->entry());
+    __ bind(*stub->continuation());
+  }
+
+  if (compilation()->env()->dtrace_method_probes()) {
+    jobject2reg(method()->constant_encoding(), O0);
+    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
+    __ delayed()->nop();
+  }
+
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(I0, O0);  // Restore the exception
+  }
+
+  // dispatch to the unwind logic
+  __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
+  __ delayed()->nop();
+
+  // Emit the slow path assembly
+  if (stub != NULL) {
+    stub->emit_code(this);
+  }
+
+  return offset;
+}
+
+
 int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
@@ -685,29 +734,29 @@
 }
 
 
-void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
-  __ call(entry, rtype);
+void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
+  __ call(op->addr(), rtype);
   // the peephole pass fills the delay slot
 }
 
 
-void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
+void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
   RelocationHolder rspec = virtual_call_Relocation::spec(pc());
   __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
   __ relocate(rspec);
-  __ call(entry, relocInfo::none);
+  __ call(op->addr(), relocInfo::none);
   // the peephole pass fills the delay slot
 }
 
 
-void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
-  add_debug_info_for_null_check_here(info);
+void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
+  add_debug_info_for_null_check_here(op->info());
   __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
-  if (__ is_simm13(vtable_offset) ) {
-    __ ld_ptr(G3_scratch, vtable_offset, G5_method);
+  if (__ is_simm13(op->vtable_offset())) {
+    __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
   } else {
     // This will generate 2 instructions
-    __ set(vtable_offset, G5_method);
+    __ set(op->vtable_offset(), G5_method);
     // ld_ptr, set_hi, set
     __ ld_ptr(G3_scratch, G5_method, G5_method);
   }
@@ -717,6 +766,16 @@
 }
 
 
+void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
+  Unimplemented();
+}
+
+
+void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
+  Unimplemented();
+}
+
+
 // load with 32-bit displacement
 int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
   int load_offset = code_offset();
@@ -1067,7 +1126,8 @@
   LIR_Const* c = src->as_constant_ptr();
   switch (c->type()) {
     case T_INT:
-    case T_FLOAT: {
+    case T_FLOAT:
+    case T_ADDRESS: {
       Register src_reg = O7;
       int value = c->as_jint_bits();
       if (value == 0) {
@@ -1123,7 +1183,8 @@
   }
   switch (c->type()) {
     case T_INT:
-    case T_FLOAT: {
+    case T_FLOAT:
+    case T_ADDRESS: {
       LIR_Opr tmp = FrameMap::O7_opr;
       int value = c->as_jint_bits();
       if (value == 0) {
@@ -1195,6 +1256,7 @@
 
   switch (c->type()) {
     case T_INT:
+    case T_ADDRESS:
       {
         jint con = c->as_jint();
         if (to_reg->is_single_cpu()) {
@@ -1720,9 +1782,13 @@
       ShouldNotReachHere();
     }
   } else if (code == lir_cmp_l2i) {
+#ifdef _LP64
+    __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
+#else
     __ lcmp(left->as_register_hi(),  left->as_register_lo(),
             right->as_register_hi(), right->as_register_lo(),
             dst->as_register());
+#endif
   } else {
     ShouldNotReachHere();
   }
@@ -2038,26 +2104,29 @@
 }
 
 
-void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
+void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
   assert(exceptionOop->as_register() == Oexception, "should match");
-  assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match");
+  assert(exceptionPC->as_register() == Oissuing_pc, "should match");
 
   info->add_register_oop(exceptionOop);
 
-  if (unwind) {
-    __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
-    __ delayed()->nop();
-  } else {
-    // reuse the debug info from the safepoint poll for the throw op itself
-    address pc_for_athrow  = __ pc();
-    int pc_for_athrow_offset = __ offset();
-    RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
-    __ set(pc_for_athrow, Oissuing_pc, rspec);
-    add_call_info(pc_for_athrow_offset, info); // for exception handler
-
-    __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
-    __ delayed()->nop();
-  }
+  // reuse the debug info from the safepoint poll for the throw op itself
+  address pc_for_athrow  = __ pc();
+  int pc_for_athrow_offset = __ offset();
+  RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
+  __ set(pc_for_athrow, Oissuing_pc, rspec);
+  add_call_info(pc_for_athrow_offset, info); // for exception handler
+
+  __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
+  __ delayed()->nop();
+}
+
+
+void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
+  assert(exceptionOop->as_register() == Oexception, "should match");
+
+  __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
+  __ delayed()->nop();
 }
 
 
@@ -2346,7 +2415,7 @@
   if (UseSlowPath ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
-    __ br(Assembler::always, false, Assembler::pn, *op->stub()->entry());
+    __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
     __ delayed()->nop();
   } else {
     __ allocate_array(op->obj()->as_register(),
@@ -2841,7 +2910,7 @@
 
 
 void LIR_Assembler::align_backward_branch_target() {
-  __ align(16);
+  __ align(OptoLoopAlignment);
 }
 
 
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,17 +42,6 @@
 }
 
 
-void C1_MacroAssembler::method_exit(bool restore_frame) {
-  // this code must be structured this way so that the return
-  // instruction can be a safepoint.
-  if (restore_frame) {
-    restore();
-  }
-  retl();
-  delayed()->nop();
-}
-
-
 void C1_MacroAssembler::explicit_null_check(Register base) {
   Unimplemented();
 }
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -677,7 +677,7 @@
         __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
 
         __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
-                        Oissuing_pc->after_save());
+                        G2_thread, Oissuing_pc->after_save());
         __ verify_not_null_oop(Oexception->after_save());
         __ jmp(O0, 0);
         __ delayed()->restore();
@@ -985,7 +985,6 @@
 
 void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) {
   Label no_deopt;
-  Label no_handler;
 
   __ verify_not_null_oop(Oexception);
 
@@ -1003,9 +1002,14 @@
   // whether it had a handler or not we will deoptimize
   // by entering the deopt blob with a pending exception.
 
+#ifdef ASSERT
+  Label done;
   __ tst(O0);
-  __ br(Assembler::zero, false, Assembler::pn, no_handler);
+  __ br(Assembler::notZero, false, Assembler::pn, done);
   __ delayed()->nop();
+  __ stop("should have found address");
+  __ bind(done);
+#endif
 
   // restore the registers that were saved at the beginning and jump to the exception handler.
   restore_live_registers(sasm);
@@ -1013,20 +1017,6 @@
   __ jmp(O0, 0);
   __ delayed()->restore();
 
-  __ bind(no_handler);
-  __ mov(L0, I7); // restore return address
-
-  // restore exception oop
-  __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save());
-  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
-
-  __ restore();
-
-  AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id));
-  __ jump_to(exc, G4);
-  __ delayed()->nop();
-
-
   oop_maps->add_gc_map(call_offset, oop_map);
 }
 
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Thu May 13 18:26:35 2010 -0700
@@ -60,9 +60,6 @@
 define_pd_global(intx, INTPRESSURE,                  48);  // large register set
 define_pd_global(intx, InteriorEntryAlignment,       16);  // = CodeEntryAlignment
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
-// The default setting 16/16 seems to work best.
-// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
-define_pd_global(intx, OptoLoopAlignment,            16);  // = 4*wordSize
 define_pd_global(intx, RegisterCostAreaRatio,        12000);
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Thu May 13 18:26:35 2010 -0700
@@ -40,6 +40,9 @@
 define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
 
 define_pd_global(intx, CodeEntryAlignment,    32);
+// The default setting 16/16 seems to work best.
+// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
+define_pd_global(intx, OptoLoopAlignment,     16);  // = 4*wordSize
 define_pd_global(intx, InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
 define_pd_global(intx, InlineSmallCode,       1500);
 #ifdef _LP64
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -244,9 +244,10 @@
 }
 
 
-void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
+void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
   mov(arg_1, O0);
-  MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 1);
+  mov(arg_2, O1);
+  MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
 }
 #endif /* CC_INTERP */
 
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,7 +121,7 @@
                      bool check_exception = true);
 
 #ifndef CC_INTERP
-  void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
+  void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
 
   // Generate a subtype check: branch to ok_is_subtype if sub_klass is
   // a subtype of super_klass.  Blows registers tmp1, tmp2 and tmp3.
--- a/src/cpu/sparc/vm/sparc.ad	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Thu May 13 18:26:35 2010 -0700
@@ -471,6 +471,9 @@
 source %{
 #define __ _masm.
 
+// Block initializing store
+#define ASI_BLK_INIT_QUAD_LDD_P    0xE2
+
 // tertiary op of a LoadP or StoreP encoding
 #define REGP_OP true
 
@@ -920,38 +923,6 @@
 #endif
 }
 
-void emit_form3_mem_reg_asi(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
-                        int src1_enc, int disp32, int src2_enc, int dst_enc, int asi) {
-
-  uint instr;
-  instr = (Assembler::ldst_op << 30)
-        | (dst_enc        << 25)
-        | (primary        << 19)
-        | (src1_enc       << 14);
-
-  int disp = disp32;
-  int index    = src2_enc;
-
-  if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
-    disp += STACK_BIAS;
-
-  // We should have a compiler bailout here rather than a guarantee.
-  // Better yet would be some mechanism to handle variable-size matches correctly.
-  guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
-
-  if( disp != 0 ) {
-    // use reg-reg form
-    // set src2=R_O7 contains offset
-    index = R_O7_enc;
-    emit3_simm13( cbuf, Assembler::arith_op, index, Assembler::or_op3, 0, disp);
-  }
-  instr |= (asi << 5);
-  instr |= index;
-  uint *code = (uint*)cbuf.code_end();
-  *code = instr;
-  cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
-}
-
 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) {
   // The method which records debug information at every safepoint
   // expects the call to be the first instruction in the snippet as
@@ -1803,8 +1774,9 @@
 // to implement the UseStrictFP mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
-// Do floats take an entire double register or just half?
-const bool Matcher::float_in_double = false;
+// Are floats conerted to double when stored to stack during deoptimization?
+// Sparc does not handle callee-save floats.
+bool Matcher::float_in_double() { return false; }
 
 // Do ints take an entire long register or just half?
 // Note that we if-def off of _LP64.
@@ -1950,11 +1922,6 @@
                        $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
   %}
 
-  enc_class form3_mem_reg_little( memory mem, iRegI dst) %{
-    emit_form3_mem_reg_asi(cbuf, this, $primary, -1,
-                     $mem$$base, $mem$$disp, $mem$$index, $dst$$reg, Assembler::ASI_PRIMARY_LITTLE);
-  %}
-
   enc_class form3_mem_prefetch_read( memory mem ) %{
     emit_form3_mem_reg(cbuf, this, $primary, -1,
                        $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
@@ -4307,8 +4274,8 @@
 // instructions for every form of operand when the instruction accepts
 // multiple operand types with the same basic encoding and format.  The classic
 // case of this is memory operands.
-// Indirect is not included since its use is limited to Compare & Swap
 opclass memory( indirect, indOffset13, indIndex );
+opclass indIndexMemory( indIndex );
 
 //----------PIPELINE-----------------------------------------------------------
 pipeline %{
@@ -6146,6 +6113,7 @@
 %}
 
 instruct prefetchw( memory mem ) %{
+  predicate(AllocatePrefetchStyle != 3 );
   match( PrefetchWrite mem );
   ins_cost(MEMORY_REF_COST);
 
@@ -6155,6 +6123,23 @@
   ins_pipe(iload_mem);
 %}
 
+// Use BIS instruction to prefetch.
+instruct prefetchw_bis( memory mem ) %{
+  predicate(AllocatePrefetchStyle == 3);
+  match( PrefetchWrite mem );
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STXA   G0,$mem\t! // Block initializing store" %}
+  ins_encode %{
+     Register base = as_Register($mem$$base);
+     int disp = $mem$$disp;
+     if (disp != 0) {
+       __ add(base, AllocatePrefetchStepSize, base);
+     }
+     __ stxa(G0, base, G0, ASI_BLK_INIT_QUAD_LDD_P);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
 
 //----------Store Instructions-------------------------------------------------
 // Store Byte
@@ -9644,84 +9629,179 @@
 
 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
   match(Set dst (ReverseBytesI src));
-  effect(DEF dst, USE src);
+
+  // Op cost is artificially doubled to make sure that load or store
+  // instructions are preferred over this one which requires a spill
+  // onto a stack slot.
+  ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
+  format %{ "LDUWA  $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ set($src$$disp + STACK_BIAS, O7);
+    __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe( iload_mem );
+%}
+
+instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
+  match(Set dst (ReverseBytesL src));
 
   // Op cost is artificially doubled to make sure that load or store
   // instructions are preferred over this one which requires a spill
   // onto a stack slot.
   ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDUWA  $src, $dst\t!asi=primary_little" %}
-  opcode(Assembler::lduwa_op3);
-  ins_encode( form3_mem_reg_little(src, dst) );
+  format %{ "LDXA   $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ set($src$$disp + STACK_BIAS, O7);
+    __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe( iload_mem );
 %}
 
-instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
-  match(Set dst (ReverseBytesL src));
-  effect(DEF dst, USE src);
+instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
+  match(Set dst (ReverseBytesUS src));
+
+  // Op cost is artificially doubled to make sure that load or store
+  // instructions are preferred over this one which requires a spill
+  // onto a stack slot.
+  ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
+  format %{ "LDUHA  $src, $dst\t!asi=primary_little\n\t" %}
+
+  ins_encode %{
+    // the value was spilled as an int so bias the load
+    __ set($src$$disp + STACK_BIAS + 2, O7);
+    __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe( iload_mem );
+%}
+
+instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
+  match(Set dst (ReverseBytesS src));
 
   // Op cost is artificially doubled to make sure that load or store
   // instructions are preferred over this one which requires a spill
   // onto a stack slot.
   ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDXA   $src, $dst\t!asi=primary_little" %}
-
-  opcode(Assembler::ldxa_op3);
-  ins_encode( form3_mem_reg_little(src, dst) );
+  format %{ "LDSHA  $src, $dst\t!asi=primary_little\n\t" %}
+
+  ins_encode %{
+    // the value was spilled as an int so bias the load
+    __ set($src$$disp + STACK_BIAS + 2, O7);
+    __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe( iload_mem );
 %}
 
 // Load Integer reversed byte order
-instruct loadI_reversed(iRegI dst, memory src) %{
+instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
   match(Set dst (ReverseBytesI (LoadI src)));
 
   ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
+  size(4);
   format %{ "LDUWA  $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::lduwa_op3);
-  ins_encode( form3_mem_reg_little( src, dst) );
+  ins_encode %{
+    __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe(iload_mem);
 %}
 
 // Load Long - aligned and reversed
-instruct loadL_reversed(iRegL dst, memory src) %{
+instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
   match(Set dst (ReverseBytesL (LoadL src)));
 
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
+  ins_cost(MEMORY_REF_COST);
+  size(4);
   format %{ "LDXA   $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::ldxa_op3);
-  ins_encode( form3_mem_reg_little( src, dst ) );
+  ins_encode %{
+    __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load unsigned short / char reversed byte order
+instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
+  match(Set dst (ReverseBytesUS (LoadUS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "LDUHA  $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load short reversed byte order
+instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
+  match(Set dst (ReverseBytesS (LoadS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "LDSHA  $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe(iload_mem);
 %}
 
 // Store Integer reversed byte order
-instruct storeI_reversed(memory dst, iRegI src) %{
+instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
   match(Set dst (StoreI dst (ReverseBytesI src)));
 
   ins_cost(MEMORY_REF_COST);
-  size(8);
+  size(4);
   format %{ "STWA   $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::stwa_op3);
-  ins_encode( form3_mem_reg_little( dst, src) );
+  ins_encode %{
+    __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
   ins_pipe(istore_mem_reg);
 %}
 
 // Store Long reversed byte order
-instruct storeL_reversed(memory dst, iRegL src) %{
+instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
   match(Set dst (StoreL dst (ReverseBytesL src)));
 
   ins_cost(MEMORY_REF_COST);
-  size(8);
+  size(4);
   format %{ "STXA   $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::stxa_op3);
-  ins_encode( form3_mem_reg_little( dst, src) );
+  ins_encode %{
+    __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+// Store unsighed short/char reversed byte order
+instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
+  match(Set dst (StoreC dst (ReverseBytesUS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "STHA   $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+// Store short reversed byte order
+instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
+  match(Set dst (StoreC dst (ReverseBytesS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "STHA   $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
   ins_pipe(istore_mem_reg);
 %}
 
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -379,7 +379,7 @@
     __ save_frame(0);             // compensates for compiler weakness
     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
     BLOCK_COMMENT("call exception_handler_for_return_address");
-    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Lscratch);
+    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
     __ mov(O0, handler_reg);
     __ restore();                 // compensates for compiler weakness
 
@@ -1148,7 +1148,7 @@
       __ andn(from, 7, from);     // Align address
       __ ldx(from, 0, O3);
       __ inc(from, 8);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_loop);
       __ ldx(from, 0, O4);
       __ deccc(count, count_dec); // Can we do next iteration after this one?
@@ -1220,7 +1220,7 @@
     //
       __ andn(end_from, 7, end_from);     // Align address
       __ ldx(end_from, 0, O3);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_loop);
       __ ldx(end_from, -8, O4);
       __ deccc(count, count_dec); // Can we do next iteration after this one?
@@ -1349,7 +1349,7 @@
     __ BIND(L_copy_byte);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_byte_loop);
       __ ldub(from, offset, O3);
       __ deccc(count);
@@ -1445,7 +1445,7 @@
                                         L_aligned_copy, L_copy_byte);
     }
     // copy 4 elements (16 bytes) at a time
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
@@ -1461,7 +1461,7 @@
     __ BIND(L_copy_byte);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_byte_loop);
       __ dec(end_from);
       __ dec(end_to);
@@ -1577,7 +1577,7 @@
     __ BIND(L_copy_2_bytes);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_2_bytes_loop);
       __ lduh(from, offset, O3);
       __ deccc(count);
@@ -1684,7 +1684,7 @@
                                         L_aligned_copy, L_copy_2_bytes);
     }
     // copy 4 elements (16 bytes) at a time
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
@@ -1781,7 +1781,7 @@
     // copy with shift 4 elements (16 bytes) at a time
       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
 
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(from, 4, O4);
       __ deccc(count, 4); // Can we do next iteration after this one?
@@ -1907,7 +1907,7 @@
     // to form 2 aligned 8-bytes chunks to store.
     //
       __ ldx(end_from, -4, O3);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(end_from, -12, O4);
       __ deccc(count, 4);
@@ -1929,7 +1929,7 @@
       __ delayed()->inc(count, 4);
 
     // copy 4 elements (16 bytes) at a time
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
@@ -2000,6 +2000,27 @@
   //      to:    O1
   //      count: O2 treated as signed
   //
+  // count -= 2;
+  // if ( count >= 0 ) { // >= 2 elements
+  //   if ( count > 6) { // >= 8 elements
+  //     count -= 6; // original count - 8
+  //     do {
+  //       copy_8_elements;
+  //       count -= 8;
+  //     } while ( count >= 0 );
+  //     count += 6;
+  //   }
+  //   if ( count >= 0 ) { // >= 2 elements
+  //     do {
+  //       copy_2_elements;
+  //     } while ( (count=count-2) >= 0 );
+  //   }
+  // }
+  // count += 2;
+  // if ( count != 0 ) { // 1 element left
+  //   copy_1_element;
+  // }
+  //
   void generate_disjoint_long_copy_core(bool aligned) {
     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
     const Register from    = O0;  // source array address
@@ -2012,7 +2033,39 @@
       __ mov(G0, offset0);   // offset from start of arrays (0)
       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
       __ delayed()->add(offset0, 8, offset8);
-      __ align(16);
+
+    // Copy by 64 bytes chunks
+    Label L_copy_64_bytes;
+    const Register from64 = O3;  // source address
+    const Register to64   = G3;  // destination address
+      __ subcc(count, 6, O3);
+      __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
+      __ delayed()->mov(to,   to64);
+      // Now we can use O4(offset0), O5(offset8) as temps
+      __ mov(O3, count);
+      __ mov(from, from64);
+
+      __ align(OptoLoopAlignment);
+    __ BIND(L_copy_64_bytes);
+      for( int off = 0; off < 64; off += 16 ) {
+        __ ldx(from64,  off+0, O4);
+        __ ldx(from64,  off+8, O5);
+        __ stx(O4, to64,  off+0);
+        __ stx(O5, to64,  off+8);
+      }
+      __ deccc(count, 8);
+      __ inc(from64, 64);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
+      __ delayed()->inc(to64, 64);
+
+      // Restore O4(offset0), O5(offset8)
+      __ sub(from64, from, offset0);
+      __ inccc(count, 6);
+      __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
+      __ delayed()->add(offset0, 8, offset8);
+
+      // Copy by 16 bytes chunks
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(from, offset0, O3);
       __ ldx(from, offset8, G3);
@@ -2023,6 +2076,7 @@
       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
       __ delayed()->inc(offset8, 16);
 
+      // Copy last 8 bytes
     __ BIND(L_copy_8_bytes);
       __ inccc(count, 2);
       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
@@ -2085,7 +2139,7 @@
       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
       __ delayed()->sllx(count, LogBytesPerLong, offset8);
       __ sub(offset8, 8, offset0);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(from, offset8, O2);
       __ ldx(from, offset0, O3);
@@ -2351,7 +2405,7 @@
     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
-    __ align(16);
+    __ align(OptoLoopAlignment);
 
     __ BIND(store_element);
     __ deccc(G1_remain);                // decrement the count
--- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,8 +37,13 @@
 
 enum /* platform_dependent_constants */ {
   // %%%%%%%% May be able to shrink this a lot
-  code_size1 = 20000,                                        // simply increase if too small (assembler will crash if too small)
-  code_size2 = 20000                                         // simply increase if too small (assembler will crash if too small)
+  code_size1 = 20000,           // simply increase if too small (assembler will crash if too small)
+  code_size2 = 20000            // simply increase if too small (assembler will crash if too small)
+};
+
+// MethodHandles adapters
+enum method_handles_platform_dependent_constants {
+  method_handles_adapters_code_size = 5000
 };
 
 class Sparc {
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1822,7 +1822,7 @@
   __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
   __ super_call_VM_leaf(L7_thread_cache,
                         CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
-                        Oissuing_pc->after_save());
+                        G2_thread, Oissuing_pc->after_save());
 
   // The caller's SP was adjusted upon method entry to accomodate
   // the callee's non-argument locals. Undo that adjustment.
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu May 13 18:26:35 2010 -0700
@@ -86,14 +86,24 @@
     if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
       FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
     }
+    if (is_niagara1_plus()) {
+      if (AllocatePrefetchStyle > 0 && FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
+        // Use BIS instruction for allocation prefetch.
+        FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
+        if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+          // Use smaller prefetch distance on N2 with BIS
+          FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
+        }
+      }
+      if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+        // Use different prefetch distance without BIS
+        FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
+      }
+    }
+#endif
     if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
       FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
     }
-    if (is_niagara1_plus() && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
-      // Use smaller prefetch distance on N2
-      FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
-    }
-#endif
   }
 
   // Use hardware population count instruction if available.
--- a/src/cpu/x86/vm/assembler_x86.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu May 13 18:26:35 2010 -0700
@@ -3365,6 +3365,13 @@
 
 #else // LP64
 
+void Assembler::set_byte_if_not_zero(Register dst) {
+  int enc = prefix_and_encode(dst->encoding(), true);
+  emit_byte(0x0F);
+  emit_byte(0x95);
+  emit_byte(0xE0 | enc);
+}
+
 // 64bit only pieces of the assembler
 // This should only be used by 64bit instructions that can use rip-relative
 // it cannot be used by instructions that want an immediate value.
@@ -8460,6 +8467,7 @@
   subptr(str1, result); // Restore counter
   shrl(str1, 1);
   addl(cnt1, str1);
+  decrementl(cnt1);
   lea(str1, Address(result, 2)); // Reload string
 
   // Load substr
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -373,6 +373,14 @@
 }
 
 
+void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
+  __ bind(_entry);
+  __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
+  ce->add_call_info_here(_info);
+  debug_only(__ should_not_reach_here());
+}
+
+
 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   __ bind(_entry);
--- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,3 +126,6 @@
     assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
     return _caller_save_xmm_regs[i];
   }
+
+  // JSR 292
+  static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu May 13 18:26:35 2010 -0700
@@ -436,40 +436,18 @@
 
   int offset = code_offset();
 
-  // if the method does not have an exception handler, then there is
-  // no reason to search for one
-  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
-    // the exception oop and pc are in rax, and rdx
-    // no other registers need to be preserved, so invalidate them
-    __ invalidate_registers(false, true, true, false, true, true);
-
-    // check that there is really an exception
-    __ verify_not_null_oop(rax);
-
-    // search an exception handler (rax: exception oop, rdx: throwing pc)
-    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
-
-    // if the call returns here, then the exception handler for particular
-    // exception doesn't exist -> unwind activation and forward exception to caller
-  }
-
-  // the exception oop is in rax,
+  // the exception oop and pc are in rax, and rdx
   // no other registers need to be preserved, so invalidate them
-  __ invalidate_registers(false, true, true, true, true, true);
+  __ invalidate_registers(false, true, true, false, true, true);
 
   // check that there is really an exception
   __ verify_not_null_oop(rax);
 
-  // unlock the receiver/klass if necessary
-  // rax,: exception
-  ciMethod* method = compilation()->method();
-  if (method->is_synchronized() && GenerateSynchronizationCode) {
-    monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
-  }
-
-  // unwind activation and forward exception to caller
-  // rax,: exception
-  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+  // search an exception handler (rax: exception oop, rdx: throwing pc)
+  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
+
+  __ stop("should not reach here");
+
   assert(code_offset() - offset <= exception_handler_size, "overflow");
   __ end_a_stub();
 
@@ -477,6 +455,60 @@
 }
 
 
+// Emit the code to remove the frame from the stack in the exception
+// unwind path.
+int LIR_Assembler::emit_unwind_handler() {
+#ifndef PRODUCT
+  if (CommentedAssembly) {
+    _masm->block_comment("Unwind handler");
+  }
+#endif
+
+  int offset = code_offset();
+
+  // Fetch the exception from TLS and clear out exception related thread state
+  __ get_thread(rsi);
+  __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
+  __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+
+  __ bind(_unwind_handler_entry);
+  __ verify_not_null_oop(rax);
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(rsi, rax);  // Preserve the exception
+  }
+
+  // Preform needed unlocking
+  MonitorExitStub* stub = NULL;
+  if (method()->is_synchronized()) {
+    monitor_address(0, FrameMap::rax_opr);
+    stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
+    __ unlock_object(rdi, rbx, rax, *stub->entry());
+    __ bind(*stub->continuation());
+  }
+
+  if (compilation()->env()->dtrace_method_probes()) {
+    __ movoop(Address(rsp, 0), method()->constant_encoding());
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
+  }
+
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(rax, rsi);  // Restore the exception
+  }
+
+  // remove the activation and dispatch to the unwind handler
+  __ remove_frame(initial_frame_size_in_bytes());
+  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+
+  // Emit the slow path assembly
+  if (stub != NULL) {
+    stub->emit_code(this);
+  }
+
+  return offset;
+}
+
+
 int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
@@ -495,8 +527,10 @@
 
   int offset = code_offset();
   InternalAddress here(__ pc());
+
   __ pushptr(here.addr());
   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
   __ end_a_stub();
 
@@ -593,7 +627,7 @@
   }
 
   // Pop the stack before the safepoint code
-  __ leave();
+  __ remove_frame(initial_frame_size_in_bytes());
 
   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 
@@ -648,7 +682,8 @@
   LIR_Const* c = src->as_constant_ptr();
 
   switch (c->type()) {
-    case T_INT: {
+    case T_INT:
+    case T_ADDRESS: {
       assert(patch_code == lir_patch_none, "no patching handled here");
       __ movl(dest->as_register(), c->as_jint());
       break;
@@ -731,6 +766,7 @@
   switch (c->type()) {
     case T_INT:  // fall through
     case T_FLOAT:
+    case T_ADDRESS:
       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
       break;
 
@@ -766,6 +802,7 @@
   switch (type) {
     case T_INT:    // fall through
     case T_FLOAT:
+    case T_ADDRESS:
       __ movl(as_Address(addr), c->as_jint_bits());
       break;
 
@@ -1207,8 +1244,7 @@
       break;
 #endif // _L64
     case T_INT:
-      // %%% could this be a movl? this is safer but longer instruction
-      __ movl2ptr(dest->as_register(), from_addr);
+      __ movl(dest->as_register(), from_addr);
       break;
 
     case T_LONG: {
@@ -1266,7 +1302,6 @@
         __ shll(dest_reg, 24);
         __ sarl(dest_reg, 24);
       }
-      // These are unsigned so the zero extension on 64bit is just what we need
       break;
     }
 
@@ -1278,8 +1313,6 @@
       } else {
         __ movw(dest_reg, from_addr);
       }
-      // This is unsigned so the zero extension on 64bit is just what we need
-      // __ movl2ptr(dest_reg, dest_reg);
       break;
     }
 
@@ -1292,8 +1325,6 @@
         __ shll(dest_reg, 16);
         __ sarl(dest_reg, 16);
       }
-      // Might not be needed in 64bit but certainly doesn't hurt (except for code size)
-      __ movl2ptr(dest_reg, dest_reg);
       break;
     }
 
@@ -2707,19 +2738,14 @@
   } else {
     assert(code == lir_cmp_l2i, "check");
 #ifdef _LP64
-      Register dest = dst->as_register();
-      __ xorptr(dest, dest);
-      Label high, done;
-      __ cmpptr(left->as_register_lo(), right->as_register_lo());
-      __ jcc(Assembler::equal, done);
-      __ jcc(Assembler::greater, high);
-      __ decrement(dest);
-      __ jmp(done);
-      __ bind(high);
-      __ increment(dest);
-
-      __ bind(done);
-
+    Label done;
+    Register dest = dst->as_register();
+    __ cmpptr(left->as_register_lo(), right->as_register_lo());
+    __ movl(dest, -1);
+    __ jccb(Assembler::less, done);
+    __ set_byte_if_not_zero(dest);
+    __ movzbl(dest, dest);
+    __ bind(done);
 #else
     __ lcmp2int(left->as_register_hi(),
                 left->as_register_lo(),
@@ -2738,6 +2764,7 @@
     switch (code) {
       case lir_static_call:
       case lir_optvirtual_call:
+      case lir_dynamic_call:
         offset += NativeCall::displacement_offset;
         break;
       case lir_icvirtual_call:
@@ -2753,30 +2780,41 @@
 }
 
 
-void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
+void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
   assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
-  __ call(AddressLiteral(entry, rtype));
-  add_call_info(code_offset(), info);
+  __ call(AddressLiteral(op->addr(), rtype));
+  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
 }
 
 
-void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
+void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
   RelocationHolder rh = virtual_call_Relocation::spec(pc());
   __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
   assert(!os::is_MP() ||
          (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
-  __ call(AddressLiteral(entry, rh));
-  add_call_info(code_offset(), info);
+  __ call(AddressLiteral(op->addr(), rh));
+  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
 }
 
 
 /* Currently, vtable-dispatch is only enabled for sparc platforms */
-void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
+void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
   ShouldNotReachHere();
 }
 
+
+void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
+  __ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
+}
+
+
+void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
+  __ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
+}
+
+
 void LIR_Assembler::emit_static_call_stub() {
   address call_pc = __ pc();
   address stub = __ start_a_stub(call_stub_size);
@@ -2805,32 +2843,28 @@
 }
 
 
-void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
+void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
   assert(exceptionOop->as_register() == rax, "must match");
-  assert(unwind || exceptionPC->as_register() == rdx, "must match");
+  assert(exceptionPC->as_register() == rdx, "must match");
 
   // exception object is not added to oop map by LinearScan
   // (LinearScan assumes that no oops are in fixed registers)
   info->add_register_oop(exceptionOop);
   Runtime1::StubID unwind_id;
 
-  if (!unwind) {
-    // get current pc information
-    // pc is only needed if the method has an exception handler, the unwind code does not need it.
-    int pc_for_athrow_offset = __ offset();
-    InternalAddress pc_for_athrow(__ pc());
-    __ lea(exceptionPC->as_register(), pc_for_athrow);
-    add_call_info(pc_for_athrow_offset, info); // for exception handler
-
-    __ verify_not_null_oop(rax);
-    // search an exception handler (rax: exception oop, rdx: throwing pc)
-    if (compilation()->has_fpu_code()) {
-      unwind_id = Runtime1::handle_exception_id;
-    } else {
-      unwind_id = Runtime1::handle_exception_nofpu_id;
-    }
+  // get current pc information
+  // pc is only needed if the method has an exception handler, the unwind code does not need it.
+  int pc_for_athrow_offset = __ offset();
+  InternalAddress pc_for_athrow(__ pc());
+  __ lea(exceptionPC->as_register(), pc_for_athrow);
+  add_call_info(pc_for_athrow_offset, info); // for exception handler
+
+  __ verify_not_null_oop(rax);
+  // search an exception handler (rax: exception oop, rdx: throwing pc)
+  if (compilation()->has_fpu_code()) {
+    unwind_id = Runtime1::handle_exception_id;
   } else {
-    unwind_id = Runtime1::unwind_exception_id;
+    unwind_id = Runtime1::handle_exception_nofpu_id;
   }
   __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
 
@@ -2839,6 +2873,13 @@
 }
 
 
+void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
+  assert(exceptionOop->as_register() == rax, "must match");
+
+  __ jmp(_unwind_handler_entry);
+}
+
+
 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
 
   // optimized version for linear scan:
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -317,14 +317,6 @@
 }
 
 
-void C1_MacroAssembler::method_exit(bool restore_frame) {
-  if (restore_frame) {
-    leave();
-  }
-  ret(0);
-}
-
-
 void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
   // Make sure there is enough stack space for this method's activation.
   // Note that we do this before doing an enter(). This matches the
@@ -333,7 +325,7 @@
   // between the two compilers.
   generate_stack_overflow_check(frame_size_in_bytes);
 
-  enter();
+  push(rbp);
 #ifdef TIERED
   // c2 leaves fpu stack dirty. Clean it on entry
   if (UseSSE < 2 ) {
@@ -344,6 +336,12 @@
 }
 
 
+void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
+  increment(rsp, frame_size_in_bytes);  // Does not emit code for frame_size == 0
+  pop(rbp);
+}
+
+
 void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
   if (C1Breakpoint) int3();
   inline_cache_check(receiver, ic_klass);
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -688,18 +688,21 @@
   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
   oop_maps->add_gc_map(call_offset, oop_map);
 
-  // rax,: handler address or NULL if no handler exists
+  // rax,: handler address
   //      will be the deopt blob if nmethod was deoptimized while we looked up
   //      handler regardless of whether handler existed in the nmethod.
 
   // only rax, is valid at this time, all other registers have been destroyed by the runtime call
   __ invalidate_registers(false, true, true, true, true, true);
 
+#ifdef ASSERT
   // Do we have an exception handler in the nmethod?
-  Label no_handler;
   Label done;
   __ testptr(rax, rax);
-  __ jcc(Assembler::zero, no_handler);
+  __ jcc(Assembler::notZero, done);
+  __ stop("no handler found");
+  __ bind(done);
+#endif
 
   // exception handler found
   // patch the return address -> the stub will directly return to the exception handler
@@ -712,36 +715,14 @@
   __ leave();
   __ ret(0);
 
-  __ bind(no_handler);
-  // no exception handler found in this method, so the exception is
-  // forwarded to the caller (using the unwind code of the nmethod)
-  // there is no need to restore the registers
-
-  // restore the real return address that was saved before the RT-call
-  __ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size));
-  __ movptr(Address(rbp, 1*BytesPerWord), real_return_addr);
-
-  // load address of JavaThread object for thread-local data
-  NOT_LP64(__ get_thread(thread);)
-  // restore exception oop into rax, (convention for unwind code)
-  __ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
-
-  // clear exception fields in JavaThread because they are no longer needed
-  // (fields must be cleared because they are processed by GC otherwise)
-  __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
-  __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
-
-  // pop the stub frame off
-  __ leave();
-
-  generate_unwind_exception(sasm);
-  __ stop("should not reach here");
 }
 
 
 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
   // incoming parameters
   const Register exception_oop = rax;
+  // callee-saved copy of exception_oop during runtime call
+  const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
   // other registers used in this stub
   const Register exception_pc = rdx;
   const Register handler_addr = rbx;
@@ -769,38 +750,39 @@
   // clear the FPU stack in case any FPU results are left behind
   __ empty_FPU_stack();
 
-  // leave activation of nmethod
-  __ leave();
-  // store return address (is on top of stack after leave)
+  // save exception_oop in callee-saved register to preserve it during runtime calls
+  __ verify_not_null_oop(exception_oop);
+  __ movptr(exception_oop_callee_saved, exception_oop);
+
+  NOT_LP64(__ get_thread(thread);)
+  // Get return address (is on top of stack after leave).
   __ movptr(exception_pc, Address(rsp, 0));
 
-  __ verify_oop(exception_oop);
-
-  // save exception oop from rax, to stack before call
-  __ push(exception_oop);
+  // search the exception handler address of the caller (using the return address)
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
+  // rax: exception handler address of the caller
 
-  // search the exception handler address of the caller (using the return address)
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc);
-  // rax,: exception handler address of the caller
-
-  // only rax, is valid at this time, all other registers have been destroyed by the call
-  __ invalidate_registers(false, true, true, true, true, true);
+  // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
+  __ invalidate_registers(false, true, true, true, false, true);
 
   // move result of call into correct register
   __ movptr(handler_addr, rax);
 
-  // restore exception oop in rax, (required convention of exception handler)
-  __ pop(exception_oop);
+  // Restore exception oop to RAX (required convention of exception handler).
+  __ movptr(exception_oop, exception_oop_callee_saved);
 
-  __ verify_oop(exception_oop);
+  // verify that there is really a valid exception in rax
+  __ verify_not_null_oop(exception_oop);
 
   // get throwing pc (= return address).
   // rdx has been destroyed by the call, so it must be set again
   // the pop is also necessary to simulate the effect of a ret(0)
   __ pop(exception_pc);
 
-  // verify that that there is really a valid exception in rax,
-  __ verify_not_null_oop(exception_oop);
+  // Restore SP from BP if the exception PC is a MethodHandle call site.
+  NOT_LP64(__ get_thread(thread);)
+  __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
+  __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // continue at exception handler (return address removed)
   // note: do *not* remove arguments when unwinding the
@@ -808,9 +790,9 @@
   //       all arguments on the stack when entering the
   //       runtime to determine the exception handler
   //       (GC happens at call site with arguments!)
-  // rax,: exception oop
+  // rax: exception oop
   // rdx: throwing pc
-  // rbx,: exception handler
+  // rbx: exception handler
   __ jmp(handler_addr);
 }
 
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Thu May 13 18:26:35 2010 -0700
@@ -80,7 +80,6 @@
 // Ergonomics related flags
 define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif // AMD64
-define_pd_global(intx, OptoLoopAlignment,            16);
 define_pd_global(intx, RegisterCostAreaRatio,        16000);
 
 // Peephole and CISC spilling both break the graph, and so makes the
--- a/src/cpu/x86/vm/globals_x86.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/globals_x86.hpp	Thu May 13 18:26:35 2010 -0700
@@ -45,6 +45,7 @@
 #else
 define_pd_global(intx, CodeEntryAlignment,       16);
 #endif // COMPILER2
+define_pd_global(intx, OptoLoopAlignment,        16);
 define_pd_global(intx, InlineFrequencyCount,     100);
 define_pd_global(intx, InlineSmallCode,          1000);
 
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,13 +60,13 @@
 }
 
 #ifdef ASSERT
-static void verify_argslot(MacroAssembler* _masm, Register rax_argslot,
+static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
                            const char* error_message) {
   // Verify that argslot lies within (rsp, rbp].
   Label L_ok, L_bad;
-  __ cmpptr(rax_argslot, rbp);
+  __ cmpptr(argslot_reg, rbp);
   __ jccb(Assembler::above, L_bad);
-  __ cmpptr(rsp, rax_argslot);
+  __ cmpptr(rsp, argslot_reg);
   __ jccb(Assembler::below, L_ok);
   __ bind(L_bad);
   __ stop(error_message);
@@ -178,22 +178,6 @@
 
   // Now move the argslot down, to point to the opened-up space.
   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
-
-  if (TaggedStackInterpreter && arg_mask != _INSERT_NO_MASK) {
-    // The caller has specified a bitmask of tags to put into the opened space.
-    // This only works when the arg_slots value is an assembly-time constant.
-    int constant_arg_slots = arg_slots.as_constant() / stack_move_unit();
-    int tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
-    for (int slot = 0; slot < constant_arg_slots; slot++) {
-      BasicType slot_type   = ((arg_mask & (1 << slot)) == 0 ? T_OBJECT : T_INT);
-      int       slot_offset = Interpreter::stackElementSize() * slot;
-      Address   tag_addr(rax_argslot, slot_offset + tag_offset);
-      __ movptr(tag_addr, frame::tag_for_basic_type(slot_type));
-    }
-    // Note that the new argument slots are tagged properly but contain
-    // garbage at this point.  The value portions must be initialized
-    // by the caller.  (Especially references!)
-  }
 }
 
 // Helper to remove argument slots from the stack.
@@ -206,18 +190,9 @@
                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
 
 #ifdef ASSERT
-  {
-    // Verify that [argslot..argslot+size) lies within (rsp, rbp).
-    Label L_ok, L_bad;
-    __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
-    __ cmpptr(rbx_temp, rbp);
-    __ jccb(Assembler::above, L_bad);
-    __ cmpptr(rsp, rax_argslot);
-    __ jccb(Assembler::below, L_ok);
-    __ bind(L_bad);
-    __ stop("deleted argument(s) must fall within current frame");
-    __ bind(L_ok);
-  }
+  // Verify that [argslot..argslot+size) lies within (rsp, rbp).
+  __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
+  verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
   if (arg_slots.is_register()) {
     Label L_ok, L_bad;
     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
@@ -321,12 +296,6 @@
   Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
   Address vmarg;                // __ argument_address(vmargslot)
 
-  int tag_offset = -1;
-  if (TaggedStackInterpreter) {
-    tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
-    assert(tag_offset = wordSize, "stack grows as expected");
-  }
-
   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 
   if (have_entry(ek)) {
@@ -372,11 +341,8 @@
       __ mov(rsp, rsi);   // cut the stack back to where the caller started
 
       // Repush the arguments as if coming from the interpreter.
-      if (TaggedStackInterpreter)  __ push(frame::tag_for_basic_type(T_INT));
       __ push(rdx_code);
-      if (TaggedStackInterpreter)  __ push(frame::tag_for_basic_type(T_OBJECT));
       __ push(rcx_fail);
-      if (TaggedStackInterpreter)  __ push(frame::tag_for_basic_type(T_OBJECT));
       __ push(rax_want);
 
       Register rbx_method = rbx_temp;
@@ -397,7 +363,6 @@
       // Do something that is at least causes a valid throw from the interpreter.
       __ bind(no_method);
       __ pop(rax_want);
-      if (TaggedStackInterpreter)  __ pop(rcx_fail);
       __ pop(rcx_fail);
       __ push(rax_want);
       __ push(rcx_fail);
@@ -510,18 +475,10 @@
   case _bound_long_direct_mh:
     {
       bool direct_to_method = (ek >= _bound_ref_direct_mh);
-      BasicType arg_type = T_ILLEGAL;
-      if (ek == _bound_long_mh || ek == _bound_long_direct_mh) {
-        arg_type = T_LONG;
-      } else if (ek == _bound_int_mh || ek == _bound_int_direct_mh) {
-        arg_type = T_INT;
-      } else {
-        assert(ek == _bound_ref_mh || ek == _bound_ref_direct_mh, "must be ref");
-        arg_type = T_OBJECT;
-      }
-      int arg_slots = type2size[arg_type];
-      int arg_mask  = (arg_type == T_OBJECT ? _INSERT_REF_MASK :
-                       arg_slots == 1       ? _INSERT_INT_MASK :  _INSERT_LONG_MASK);
+      BasicType arg_type  = T_ILLEGAL;
+      int       arg_mask  = _INSERT_NO_MASK;
+      int       arg_slots = -1;
+      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
 
       // make room for the new argument:
       __ movl(rax_argslot, rcx_bmh_vmargslot);
@@ -584,7 +541,7 @@
 
       Label done;
       __ movptr(rdx_temp, vmarg);
-      __ testl(rdx_temp, rdx_temp);
+      __ testptr(rdx_temp, rdx_temp);
       __ jccb(Assembler::zero, done);         // no cast if null
       __ load_klass(rdx_temp, rdx_temp);
 
@@ -660,13 +617,10 @@
         }
         break;
       default:
-        assert(false, "");
+        ShouldNotReachHere();
       }
-      goto finish_int_conversion;
-    }
 
-  finish_int_conversion:
-    {
+      // Do the requested conversion and store the value.
       Register rbx_vminfo = rbx_temp;
       __ movl(rbx_vminfo, rcx_amh_conversion);
       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
@@ -692,7 +646,7 @@
       __ shrl(rdx_temp /*, rcx*/);
 
       __ bind(done);
-      __ movl(vmarg, rdx_temp);
+      __ movl(vmarg, rdx_temp);  // Store the value.
       __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv
 
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
@@ -715,9 +669,14 @@
       switch (ek) {
       case _adapter_opt_i2l:
         {
+#ifdef _LP64
+          __ movslq(rdx_temp, vmarg1);  // Load sign-extended
+          __ movq(vmarg1, rdx_temp);    // Store into first slot
+#else
           __ movl(rdx_temp, vmarg1);
-          __ sarl(rdx_temp, 31);  // __ extend_sign()
+          __ sarl(rdx_temp, BitsPerInt - 1);  // __ extend_sign()
           __ movl(vmarg2, rdx_temp); // store second word
+#endif
         }
         break;
       case _adapter_opt_unboxl:
@@ -727,14 +686,19 @@
           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
           __ null_check(rdx_temp, value_offset);
+#ifdef _LP64
+          __ movq(rbx_temp, Address(rdx_temp, value_offset));
+          __ movq(vmarg1, rbx_temp);
+#else
           __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
           __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
           __ movl(vmarg1, rbx_temp);
           __ movl(vmarg2, rdx_temp);
+#endif
         }
         break;
       default:
-        assert(false, "");
+        ShouldNotReachHere();
       }
 
       __ movptr(rcx_recv, rcx_mh_vmtarget);
@@ -768,20 +732,9 @@
       if (ek == _adapter_opt_f2d) {
         __ fld_s(vmarg);        // load float to ST0
         __ fstp_s(vmarg);       // store single
-      } else if (!TaggedStackInterpreter) {
+      } else {
         __ fld_d(vmarg);        // load double to ST0
         __ fstp_s(vmarg);       // store single
-      } else {
-        Address vmarg_tag = vmarg.plus_disp(tag_offset);
-        Address vmarg2    = vmarg.plus_disp(Interpreter::stackElementSize());
-        // vmarg2_tag does not participate in this code
-        Register rbx_tag = rbx_temp;
-        __ movl(rbx_tag, vmarg_tag); // preserve tag
-        __ movl(rdx_temp, vmarg2); // get second word of double
-        __ movl(vmarg_tag, rdx_temp); // align with first word
-        __ fld_d(vmarg);        // load double to ST0
-        __ movl(vmarg_tag, rbx_tag); // restore tag
-        __ fstp_s(vmarg);       // store single
       }
 #endif //_LP64
 
@@ -812,19 +765,8 @@
   case _adapter_opt_rot_2_up:
   case _adapter_opt_rot_2_down:
     {
-      int rotate = 0, swap_slots = 0;
-      switch ((int)ek) {
-      case _adapter_opt_swap_1:     swap_slots = 1; break;
-      case _adapter_opt_swap_2:     swap_slots = 2; break;
-      case _adapter_opt_rot_1_up:   swap_slots = 1; rotate++; break;
-      case _adapter_opt_rot_1_down: swap_slots = 1; rotate--; break;
-      case _adapter_opt_rot_2_up:   swap_slots = 2; rotate++; break;
-      case _adapter_opt_rot_2_down: swap_slots = 2; rotate--; break;
-      default: assert(false, "");
-      }
-
-      // the real size of the move must be doubled if TaggedStackInterpreter:
-      int swap_bytes = (int)( swap_slots * Interpreter::stackElementWords() * wordSize );
+      int swap_bytes = 0, rotate = 0;
+      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
 
       // 'argslot' is the position of the first argument to swap
       __ movl(rax_argslot, rcx_amh_vmargslot);
@@ -925,8 +867,8 @@
 
       // 'stack_move' is negative number of words to duplicate
       Register rdx_stack_move = rdx_temp;
-      __ movl(rdx_stack_move, rcx_amh_conversion);
-      __ sarl(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
+      __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
+      __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
 
       int argslot0_num = 0;
       Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
@@ -988,8 +930,8 @@
 
       // 'stack_move' is number of words to drop
       Register rdi_stack_move = rdi;
-      __ movl(rdi_stack_move, rcx_amh_conversion);
-      __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
+      __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
+      __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
       remove_arg_slots(_masm, rdi_stack_move,
                        rax_argslot, rbx_temp, rdx_temp);
 
@@ -1014,11 +956,7 @@
   case _adapter_opt_spread_more:
     {
       // spread an array out into a group of arguments
-      int length_constant = -1;
-      switch (ek) {
-      case _adapter_opt_spread_0: length_constant = 0; break;
-      case _adapter_opt_spread_1: length_constant = 1; break;
-      }
+      int length_constant = get_ek_adapter_opt_spread_info(ek);
 
       // find the address of the array argument
       __ movl(rax_argslot, rcx_amh_vmargslot);
@@ -1079,8 +1017,8 @@
         __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
         // 'stack_move' is negative number of words to insert
         Register rdi_stack_move = rdi;
-        __ movl(rdi_stack_move, rcx_amh_conversion);
-        __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
+        __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
+        __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
         Register rsi_temp = rsi_array;  // spill this
         insert_arg_slots(_masm, rdi_stack_move, -1,
                          rax_argslot, rbx_temp, rsi_temp);
@@ -1114,10 +1052,6 @@
         __ movptr(rbx_temp, Address(rsi_source, 0));
         __ movptr(Address(rax_argslot, 0), rbx_temp);
         __ addptr(rsi_source, type2aelembytes(elem_type));
-        if (TaggedStackInterpreter) {
-          __ movptr(Address(rax_argslot, tag_offset),
-                    frame::tag_for_basic_type(elem_type));
-        }
         __ addptr(rax_argslot, Interpreter::stackElementSize());
         __ cmpptr(rax_argslot, rdx_argslot_limit);
         __ jccb(Assembler::less, loop);
@@ -1131,11 +1065,7 @@
           __ movptr(rbx_temp, Address(rsi_array, elem_offset));
           __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
           elem_offset += type2aelembytes(elem_type);
-          if (TaggedStackInterpreter) {
-            __ movptr(Address(rax_argslot, slot_offset + tag_offset),
-                      frame::tag_for_basic_type(elem_type));
-          }
-          slot_offset += Interpreter::stackElementSize();
+           slot_offset += Interpreter::stackElementSize();
         }
       }
 
--- a/src/cpu/x86/vm/runtime_x86_32.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/runtime_x86_32.cpp	Thu May 13 18:26:35 2010 -0700
@@ -115,8 +115,8 @@
 
   // rax: exception handler for given <exception oop/exception pc>
 
-  // Restore SP from BP if the exception PC is a MethodHandle call.
-  __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
+  // Restore SP from BP if the exception PC is a MethodHandle call site.
+  __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
   __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // We have a handler in rax, (could be deopt blob)
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu May 13 18:26:35 2010 -0700
@@ -3328,8 +3328,8 @@
 
   // rax: exception handler
 
-  // Restore SP from BP if the exception PC is a MethodHandle call.
-  __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
+  // Restore SP from BP if the exception PC is a MethodHandle call site.
+  __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
   __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // We have a handler in rax (could be deopt blob).
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu May 13 18:26:35 2010 -0700
@@ -369,7 +369,7 @@
   // The pending exception in Thread is converted into a Java-level exception.
   //
   // Contract with Java-level exception handlers:
-  // rax,: exception
+  // rax: exception
   // rdx: throwing pc
   //
   // NOTE: At entry of this stub, exception-pc must be on stack !!
@@ -377,6 +377,12 @@
   address generate_forward_exception() {
     StubCodeMark mark(this, "StubRoutines", "forward exception");
     address start = __ pc();
+    const Register thread = rcx;
+
+    // other registers used in this stub
+    const Register exception_oop = rax;
+    const Register handler_addr  = rbx;
+    const Register exception_pc  = rdx;
 
     // Upon entry, the sp points to the return address returning into Java
     // (interpreted or compiled) code; i.e., the return address becomes the
@@ -389,8 +395,8 @@
 #ifdef ASSERT
     // make sure this code is only executed if there is a pending exception
     { Label L;
-      __ get_thread(rcx);
-      __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+      __ get_thread(thread);
+      __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
       __ jcc(Assembler::notEqual, L);
       __ stop("StubRoutines::forward exception: no pending exception (1)");
       __ bind(L);
@@ -398,33 +404,40 @@
 #endif
 
     // compute exception handler into rbx,
-    __ movptr(rax, Address(rsp, 0));
+    __ get_thread(thread);
+    __ movptr(exception_pc, Address(rsp, 0));
     BLOCK_COMMENT("call exception_handler_for_return_address");
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax);
-    __ mov(rbx, rax);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
+    __ mov(handler_addr, rax);
 
-    // setup rax, & rdx, remove return address & clear pending exception
-    __ get_thread(rcx);
-    __ pop(rdx);
-    __ movptr(rax, Address(rcx, Thread::pending_exception_offset()));
-    __ movptr(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
+    // setup rax & rdx, remove return address & clear pending exception
+    __ get_thread(thread);
+    __ pop(exception_pc);
+    __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
+    __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
 
 #ifdef ASSERT
     // make sure exception is set
     { Label L;
-      __ testptr(rax, rax);
+      __ testptr(exception_oop, exception_oop);
       __ jcc(Assembler::notEqual, L);
       __ stop("StubRoutines::forward exception: no pending exception (2)");
       __ bind(L);
     }
 #endif
 
+    // Verify that there is really a valid exception in RAX.
+    __ verify_oop(exception_oop);
+
+    // Restore SP from BP if the exception PC is a MethodHandle call site.
+    __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
+    __ cmovptr(Assembler::notEqual, rsp, rbp);
+
     // continue at exception handler (return address removed)
-    // rax,: exception
-    // rbx,: exception handler
+    // rax: exception
+    // rbx: exception handler
     // rdx: throwing pc
-    __ verify_oop(rax);
-    __ jmp(rbx);
+    __ jmp(handler_addr);
 
     return start;
   }
@@ -799,7 +812,7 @@
     Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
     // Copy 64-byte chunks
     __ jmpb(L_copy_64_bytes);
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_copy_64_bytes_loop);
 
     if(UseUnalignedLoadStores) {
@@ -861,7 +874,7 @@
     Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
     // Copy 64-byte chunks
     __ jmpb(L_copy_64_bytes);
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_copy_64_bytes_loop);
     __ movq(mmx0, Address(from, 0));
     __ movq(mmx1, Address(from, 8));
@@ -1131,7 +1144,7 @@
       __ movl(Address(to, count, sf, 0), rdx);
       __ jmpb(L_copy_8_bytes);
 
-      __ align(16);
+      __ align(OptoLoopAlignment);
       // Move 8 bytes
     __ BIND(L_copy_8_bytes_loop);
       if (UseXMMForArrayCopy) {
@@ -1222,7 +1235,7 @@
       }
     } else {
       __ jmpb(L_copy_8_bytes);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_8_bytes_loop);
       __ fild_d(Address(from, 0));
       __ fistp_d(Address(from, to_from, Address::times_1));
@@ -1269,7 +1282,7 @@
 
     __ jmpb(L_copy_8_bytes);
 
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_copy_8_bytes_loop);
     if (VM_Version::supports_mmx()) {
       if (UseXMMForArrayCopy) {
@@ -1441,7 +1454,7 @@
     // Loop control:
     //   for (count = -count; count != 0; count++)
     // Base pointers src, dst are biased by 8*count,to last element.
-    __ align(16);
+    __ align(OptoLoopAlignment);
 
     __ BIND(L_store_element);
     __ movptr(to_element_addr, elem);     // store the oop
@@ -2263,16 +2276,6 @@
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
 
-    // generic method handle stubs
-    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
-      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
-           ek < MethodHandles::_EK_LIMIT;
-           ek = MethodHandles::EntryKind(1 + (int)ek)) {
-        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
-        MethodHandles::generate_method_handle_stub(_masm, ek);
-      }
-    }
-
     generate_math_stubs();
   }
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu May 13 18:26:35 2010 -0700
@@ -466,7 +466,7 @@
     BLOCK_COMMENT("call exception_handler_for_return_address");
     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
                          SharedRuntime::exception_handler_for_return_address),
-                    c_rarg0);
+                    r15_thread, c_rarg0);
     __ mov(rbx, rax);
 
     // setup rax & rdx, remove return address & clear pending exception
@@ -871,9 +871,8 @@
   }
 
   address generate_fp_mask(const char *stub_name, int64_t mask) {
+    __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", stub_name);
-
-    __ align(16);
     address start = __ pc();
 
     __ emit_data64( mask, relocInfo::none );
@@ -1268,7 +1267,7 @@
                              Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
     DEBUG_ONLY(__ stop("enter at entry label, not here"));
     Label L_loop;
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_loop);
     if(UseUnalignedLoadStores) {
       __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
@@ -1309,7 +1308,7 @@
                               Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
     DEBUG_ONLY(__ stop("enter at entry label, not here"));
     Label L_loop;
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_loop);
     if(UseUnalignedLoadStores) {
       __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
@@ -2229,7 +2228,7 @@
     // Loop control:
     //   for (count = -count; count != 0; count++)
     // Base pointers src, dst are biased by 8*(count-1),to last element.
-    __ align(16);
+    __ align(OptoLoopAlignment);
 
     __ BIND(L_store_element);
     __ store_heap_oop(to_element_addr, rax_oop);  // store the oop
@@ -3009,16 +3008,6 @@
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
 
-    // generic method handle stubs
-    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
-      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
-           ek < MethodHandles::_EK_LIMIT;
-           ek = MethodHandles::EntryKind(1 + (int)ek)) {
-        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
-        MethodHandles::generate_method_handle_stub(_masm, ek);
-      }
-    }
-
     generate_math_stubs();
   }
 
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,11 @@
   code_size2 = 22000            // simply increase if too small (assembler will crash if too small)
 };
 
+// MethodHandles adapters
+enum method_handles_platform_dependent_constants {
+  method_handles_adapters_code_size = 5000
+};
+
 class x86 {
  friend class StubGenerator;
  friend class VMStructs;
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,12 +28,14 @@
 
 static bool    returns_to_call_stub(address return_pc)   { return return_pc == _call_stub_return_address; }
 
-enum platform_dependent_constants
-{
-  code_size1 =  19000, // simply increase if too small (assembler will
-                      // crash if too small)
-  code_size2 = 22000  // simply increase if too small (assembler will
-                      // crash if too small)
+enum platform_dependent_constants {
+  code_size1 = 19000,          // simply increase if too small (assembler will crash if too small)
+  code_size2 = 22000           // simply increase if too small (assembler will crash if too small)
+};
+
+// MethodHandles adapters
+enum method_handles_platform_dependent_constants {
+  method_handles_adapters_code_size = 13000
 };
 
 class x86 {
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1550,6 +1550,7 @@
 void TemplateInterpreterGenerator::generate_throw_exception() {
   // Entry point in previous activation (i.e., if the caller was interpreted)
   Interpreter::_rethrow_exception_entry = __ pc();
+  const Register thread = rcx;
 
   // Restore sp to interpreter_frame_last_sp even though we are going
   // to empty the expression stack for the exception processing.
@@ -1598,10 +1599,10 @@
   // Set the popframe_processing bit in pending_popframe_condition indicating that we are
   // currently handling popframe, so that call_VMs that may happen later do not trigger new
   // popframe handling cycles.
-  __ get_thread(rcx);
-  __ movl(rdx, Address(rcx, JavaThread::popframe_condition_offset()));
+  __ get_thread(thread);
+  __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
   __ orl(rdx, JavaThread::popframe_processing_bit);
-  __ movl(Address(rcx, JavaThread::popframe_condition_offset()), rdx);
+  __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
 
   {
     // Check to see whether we are returning to a deoptimized frame.
@@ -1629,8 +1630,8 @@
     __ subptr(rdi, rax);
     __ addptr(rdi, wordSize);
     // Save these arguments
-    __ get_thread(rcx);
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi);
+    __ get_thread(thread);
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
 
     __ remove_activation(vtos, rdx,
                          /* throw_monitor_exception */ false,
@@ -1638,8 +1639,8 @@
                          /* notify_jvmdi */ false);
 
     // Inform deoptimization that it is responsible for restoring these arguments
-    __ get_thread(rcx);
-    __ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
+    __ get_thread(thread);
+    __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
 
     // Continue in deoptimization handler
     __ jmp(rdx);
@@ -1665,12 +1666,12 @@
   // expression stack if necessary.
   __ mov(rax, rsp);
   __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ get_thread(rcx);
+  __ get_thread(thread);
   // PC must point into interpreter here
-  __ set_last_Java_frame(rcx, noreg, rbp, __ pc());
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), rcx, rax, rbx);
-  __ get_thread(rcx);
-  __ reset_last_Java_frame(rcx, true, true);
+  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
+  __ get_thread(thread);
+  __ reset_last_Java_frame(thread, true, true);
   // Restore the last_sp and null it out
   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
@@ -1684,8 +1685,8 @@
   }
 
   // Clear the popframe condition flag
-  __ get_thread(rcx);
-  __ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
+  __ get_thread(thread);
+  __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
 
   __ dispatch_next(vtos);
   // end of PopFrame support
@@ -1694,27 +1695,27 @@
 
   // preserve exception over this code sequence
   __ pop_ptr(rax);
-  __ get_thread(rcx);
-  __ movptr(Address(rcx, JavaThread::vm_result_offset()), rax);
+  __ get_thread(thread);
+  __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
   // remove the activation (without doing throws on illegalMonitorExceptions)
   __ remove_activation(vtos, rdx, false, true, false);
   // restore exception
-  __ get_thread(rcx);
-  __ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
-  __ movptr(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD);
+  __ get_thread(thread);
+  __ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
+  __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
   __ verify_oop(rax);
 
   // Inbetween activations - previous activation type unknown yet
   // compute continuation point - the continuation point expects
   // the following registers set up:
   //
-  // rax,: exception
+  // rax: exception
   // rdx: return address/pc that threw exception
   // rsp: expression stack of caller
-  // rbp,: rbp, of caller
+  // rbp: rbp, of caller
   __ push(rax);                                  // save exception
   __ push(rdx);                                  // save return address
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx);
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
   __ mov(rbx, rax);                              // save exception handler
   __ pop(rdx);                                   // restore return address
   __ pop(rax);                                   // restore exception
@@ -1728,6 +1729,7 @@
 //
 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
   address entry = __ pc();
+  const Register thread = rcx;
 
   __ restore_bcp();
   __ restore_locals();
@@ -1735,8 +1737,8 @@
   __ empty_FPU_stack();
   __ load_earlyret_value(state);
 
-  __ get_thread(rcx);
-  __ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
+  __ get_thread(thread);
+  __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
   const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
 
   // Clear the earlyret state
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1741,7 +1741,7 @@
   __ push(rdx);                                  // save return address
   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
                           SharedRuntime::exception_handler_for_return_address),
-                        rdx);
+                        r15_thread, rdx);
   __ mov(rbx, rax);                              // save exception handler
   __ pop(rdx);                                   // restore return address
   __ pop(rax);                                   // restore exception
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2915,12 +2915,8 @@
     __ andl(recv, 0xFF);
     // recv count is 0 based?
     Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
-    if (is_invokedynamic) {
-      __ lea(recv, recv_addr);
-    } else {
-      __ movptr(recv, recv_addr);
-      __ verify_oop(recv);
-    }
+    __ movptr(recv, recv_addr);
+    __ verify_oop(recv);
   }
 
   // do null check if needed
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2860,12 +2860,8 @@
     __ andl(recv, 0xFF);
     if (TaggedStackInterpreter) __ shll(recv, 1);  // index*2
     Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
-    if (is_invokedynamic) {
-      __ lea(recv, recv_addr);
-    } else {
-      __ movptr(recv, recv_addr);
-      __ verify_oop(recv);
-    }
+    __ movptr(recv, recv_addr);
+    __ verify_oop(recv);
   }
 
   // do null check if needed
--- a/src/cpu/x86/vm/x86_32.ad	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/x86_32.ad	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -1444,8 +1444,10 @@
 // to implement the UseStrictFP mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = true;
 
-// Do floats take an entire double register or just half?
-const bool Matcher::float_in_double = true;
+// Are floats conerted to double when stored to stack during deoptimization?
+// On x32 it is stored with convertion only when FPU is used for floats.
+bool Matcher::float_in_double() { return (UseSSE == 0); }
+
 // Do ints take an entire long register or just half?
 const bool Matcher::int_in_long = false;
 
@@ -6270,6 +6272,30 @@
   ins_pipe( ialu_reg_reg);
 %}
 
+instruct bytes_reverse_unsigned_short(eRegI dst) %{
+  match(Set dst (ReverseBytesUS dst));
+
+  format %{ "BSWAP  $dst\n\t" 
+            "SHR    $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ shrl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
+instruct bytes_reverse_short(eRegI dst) %{
+  match(Set dst (ReverseBytesS dst));
+
+  format %{ "BSWAP  $dst\n\t" 
+            "SAR    $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ sarl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
 
 //---------- Zeros Count Instructions ------------------------------------------
 
--- a/src/cpu/x86/vm/x86_64.ad	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -2074,8 +2074,10 @@
 // implement the UseStrictFP mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = true;
 
-// Do floats take an entire double register or just half?
-const bool Matcher::float_in_double = true;
+// Are floats conerted to double when stored to stack during deoptimization?
+// On x64 it is stored without convertion so we can use normal access.
+bool Matcher::float_in_double() { return false; }
+
 // Do ints take an entire long register or just half?
 const bool Matcher::int_in_long = true;
 
@@ -7369,6 +7371,30 @@
   ins_pipe( ialu_reg);
 %}
 
+instruct bytes_reverse_unsigned_short(rRegI dst) %{
+  match(Set dst (ReverseBytesUS dst));
+
+  format %{ "bswapl  $dst\n\t" 
+            "shrl    $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ shrl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
+instruct bytes_reverse_short(rRegI dst) %{
+  match(Set dst (ReverseBytesS dst));
+
+  format %{ "bswapl  $dst\n\t" 
+            "sar     $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ sarl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
 instruct loadI_reversed(rRegI dst, memory src) %{
   match(Set dst (ReverseBytesI (LoadI src)));
 
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu May 13 18:26:35 2010 -0700
@@ -39,21 +39,9 @@
 
 void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
-  ZeroStack *stack = thread->zero_stack();
-
-  // Adjust the caller's stack frame to accomodate any additional
-  // local variables we have contiguously with our parameters.
-  int extra_locals = method->max_locals() - method->size_of_parameters();
-  if (extra_locals > 0) {
-    if (extra_locals > stack->available_words()) {
-      Unimplemented();
-    }
-    for (int i = 0; i < extra_locals; i++)
-      stack->push(0);
-  }
 
   // Allocate and initialize our frame.
-  InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
+  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
   thread->push_zero_frame(frame);
 
   // Execute those bytecodes!
@@ -76,12 +64,6 @@
   intptr_t *result = NULL;
   int result_slots = 0;
 
-  // Check we're not about to run out of stack
-  if (stack_overflow_imminent(thread)) {
-    CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
-    goto unwind_and_return;
-  }
-
   while (true) {
     // We can set up the frame anchor with everything we want at
     // this point as we are thread_in_Java and no safepoints can
@@ -123,9 +105,9 @@
       int monitor_words = frame::interpreter_frame_monitor_size();
 
       // Allocate the space
-      if (monitor_words > stack->available_words()) {
-        Unimplemented();
-      }
+      stack->overflow_check(monitor_words, THREAD);
+      if (HAS_PENDING_EXCEPTION)
+        break;
       stack->alloc(monitor_words * wordSize);
 
       // Move the expression stack contents
@@ -172,8 +154,6 @@
     }
   }
 
- unwind_and_return:
-
   // Unwind the current frame
   thread->pop_zero_frame();
 
@@ -193,20 +173,13 @@
   ZeroStack *stack = thread->zero_stack();
 
   // Allocate and initialize our frame
-  InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
+  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
   thread->push_zero_frame(frame);
   interpreterState istate = frame->interpreter_state();
   intptr_t *locals = istate->locals();
 
-  // Check we're not about to run out of stack
-  if (stack_overflow_imminent(thread)) {
-    CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
-    goto unwind_and_return;
-  }
-
   // Update the invocation counter
   if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
-    thread->set_do_not_unlock();
     InvocationCounter *counter = method->invocation_counter();
     counter->increment();
     if (counter->reached_InvocationLimit()) {
@@ -215,7 +188,6 @@
       if (HAS_PENDING_EXCEPTION)
         goto unwind_and_return;
     }
-    thread->clr_do_not_unlock();
   }
 
   // Lock if necessary
@@ -266,9 +238,10 @@
   assert(function != NULL, "should be set if signature handler is");
 
   // Build the argument list
-  if (handler->argument_count() * 2 > stack->available_words()) {
-    Unimplemented();
-  }
+  stack->overflow_check(handler->argument_count() * 2, THREAD);
+  if (HAS_PENDING_EXCEPTION)
+    goto unlock_unwind_and_return;
+
   void **arguments;
   void *mirror; {
     arguments =
@@ -505,9 +478,7 @@
   switch (entry->flag_state()) {
   case ltos:
   case dtos:
-    if (stack->available_words() < 1) {
-      Unimplemented();
-    }
+    stack->overflow_check(1, CHECK);
     stack->alloc(wordSize);
     break;
   }
@@ -603,39 +574,30 @@
   stack->set_sp(stack->sp() + method->size_of_parameters());
 }
 
-bool CppInterpreter::stack_overflow_imminent(JavaThread *thread) {
-  // How is the ABI stack?
-  address stack_top = thread->stack_base() - thread->stack_size();
-  int free_stack = os::current_stack_pointer() - stack_top;
-  if (free_stack < StackShadowPages * os::vm_page_size()) {
-    return true;
-  }
+InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+  ZeroStack *stack = thread->zero_stack();
+
+  // Calculate the size of the frame we'll build, including
+  // any adjustments to the caller's frame that we'll make.
+  int extra_locals  = 0;
+  int monitor_words = 0;
+  int stack_words   = 0;
 
-  // How is the Zero stack?
-  // Throwing a StackOverflowError involves a VM call, which means
-  // we need a frame on the stack.  We should be checking here to
-  // ensure that methods we call have enough room to install the
-  // largest possible frame, but that's more than twice the size
-  // of the entire Zero stack we get by default, so we just check
-  // we have *some* space instead...
-  free_stack = thread->zero_stack()->available_words() * wordSize;
-  if (free_stack < StackShadowPages * os::vm_page_size()) {
-    return true;
+  if (!method->is_native()) {
+    extra_locals = method->max_locals() - method->size_of_parameters();
+    stack_words  = method->max_stack();
   }
+  if (method->is_synchronized()) {
+    monitor_words = frame::interpreter_frame_monitor_size();
+  }
+  stack->overflow_check(
+    extra_locals + header_words + monitor_words + stack_words, CHECK_NULL);
 
-  return false;
-}
-
-InterpreterFrame *InterpreterFrame::build(ZeroStack*       stack,
-                                          const methodOop  method,
-                                          JavaThread*      thread) {
-  int monitor_words =
-    method->is_synchronized() ? frame::interpreter_frame_monitor_size() : 0;
-  int stack_words = method->is_native() ? 0 : method->max_stack();
-
-  if (header_words + monitor_words + stack_words > stack->available_words()) {
-    Unimplemented();
-  }
+  // Adjust the caller's stack frame to accomodate any additional
+  // local variables we have contiguously with our parameters.
+  for (int i = 0; i < extra_locals; i++)
+    stack->push(0);
 
   intptr_t *locals;
   if (method->is_native())
@@ -814,14 +776,13 @@
 
 // Deoptimization helpers
 
-InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, int size) {
+InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
+  ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
+
   int size_in_words = size >> LogBytesPerWord;
   assert(size_in_words * wordSize == size, "unaligned");
   assert(size_in_words >= header_words, "too small");
-
-  if (size_in_words > stack->available_words()) {
-    Unimplemented();
-  }
+  stack->overflow_check(size_in_words, CHECK_NULL);
 
   stack->push(0); // next_frame, filled in later
   intptr_t *fp = stack->sp();
--- a/src/cpu/zero/vm/cppInterpreter_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -39,9 +39,5 @@
   static void main_loop(int recurse, TRAPS);
 
  private:
-  // Stack overflow checks
-  static bool stack_overflow_imminent(JavaThread *thread);
-
- private:
   // Fast result type determination
   static BasicType result_type_of(methodOop method);
--- a/src/cpu/zero/vm/entryFrame_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/entryFrame_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,10 +47,10 @@
   };
 
  public:
-  static EntryFrame *build(ZeroStack*       stack,
-                           const intptr_t*  parameters,
+  static EntryFrame *build(const intptr_t*  parameters,
                            int              parameter_words,
-                           JavaCallWrapper* call_wrapper);
+                           JavaCallWrapper* call_wrapper,
+                           TRAPS);
  public:
   JavaCallWrapper *call_wrapper() const {
     return (JavaCallWrapper *) value_of_word(call_wrapper_off);
--- a/src/cpu/zero/vm/fakeStubFrame_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/fakeStubFrame_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
   };
 
  public:
-  static FakeStubFrame *build(ZeroStack* stack);
+  static FakeStubFrame *build(TRAPS);
 
  public:
   void identify_word(int   frame_index,
--- a/src/cpu/zero/vm/globals_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/globals_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -35,6 +35,7 @@
 define_pd_global(bool,  UncommonNullCast,     true);
 
 define_pd_global(intx,  CodeEntryAlignment,   32);
+define_pd_global(intx,  OptoLoopAlignment,    16);
 define_pd_global(intx,  InlineFrequencyCount, 100);
 define_pd_global(intx,  PreInflateSpin,       10);
 
--- a/src/cpu/zero/vm/interpreterFrame_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/interpreterFrame_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,10 +55,8 @@
   };
 
  public:
-  static InterpreterFrame *build(ZeroStack*      stack,
-                                 const methodOop method,
-                                 JavaThread*     thread);
-  static InterpreterFrame *build(ZeroStack* stack, int size);
+  static InterpreterFrame *build(const methodOop method, TRAPS);
+  static InterpreterFrame *build(int size, TRAPS);
 
  public:
   interpreterState interpreter_state() const {
--- a/src/cpu/zero/vm/interpreterRT_zero.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/interpreterRT_zero.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,9 +140,8 @@
   int required_words =
     (align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
     (method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
-  if (required_words > stack->available_words()) {
-    Unimplemented();
-  }
+
+  stack->overflow_check(required_words, CHECK_NULL);
 
   intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize);
   SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf);
--- a/src/cpu/zero/vm/methodHandles_zero.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/methodHandles_zero.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2009 Red Hat, Inc.
+ * Copyright 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,4 +23,10 @@
  *
  */
 
-// This file is intentionally empty
+#include "incls/_precompiled.incl"
+#include "incls/_methodHandles_zero.cpp.incl"
+
+void MethodHandles::generate_method_handle_stub(MacroAssembler*          masm,
+                                                MethodHandles::EntryKind ek) {
+  ShouldNotCallThis();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/stack_zero.cpp	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_stack_zero.cpp.incl"
+
+void ZeroStack::handle_overflow(TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+
+  // Set up the frame anchor if it isn't already
+  bool has_last_Java_frame = thread->has_last_Java_frame();
+  if (!has_last_Java_frame) {
+    ZeroFrame *frame = thread->top_zero_frame();
+    while (frame) {
+      if (frame->is_shark_frame())
+        break;
+
+      if (frame->is_interpreter_frame()) {
+        interpreterState istate =
+          frame->as_interpreter_frame()->interpreter_state();
+        if (istate->self_link() == istate)
+          break;
+      }
+
+      frame = frame->next();
+    }
+
+    if (frame == NULL)
+      fatal("unrecoverable stack overflow");
+
+    thread->set_last_Java_frame(frame);
+  }
+
+  // Throw the exception
+  switch (thread->thread_state()) {
+  case _thread_in_Java:
+    InterpreterRuntime::throw_StackOverflowError(thread);
+    break;
+
+  case _thread_in_vm:
+    Exceptions::throw_stack_overflow_exception(thread, __FILE__, __LINE__);
+    break;
+
+  default:
+    ShouldNotReachHere();
+  }
+
+  // Reset the frame anchor if necessary
+  if (!has_last_Java_frame)
+    thread->reset_last_Java_frame();
+}
--- a/src/cpu/zero/vm/stack_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/stack_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,9 +29,14 @@
   intptr_t *_top;  // the word past the end of the stack
   intptr_t *_sp;   // the top word on the stack
 
+ private:
+  int _shadow_pages_size; // how much ABI stack must we keep free?
+
  public:
   ZeroStack()
-    : _base(NULL), _top(NULL), _sp(NULL) {}
+    : _base(NULL), _top(NULL), _sp(NULL) {
+    _shadow_pages_size = StackShadowPages * os::vm_page_size();
+  }
 
   bool needs_setup() const {
     return _base == NULL;
@@ -81,6 +86,14 @@
     return _sp -= count;
   }
 
+  int shadow_pages_size() const {
+    return _shadow_pages_size;
+  }
+
+ public:
+  void overflow_check(int required_words, TRAPS);
+  static void handle_overflow(TRAPS);
+
  public:
   static ByteSize base_offset() {
     return byte_offset_of(ZeroStack, _base);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/stack_zero.inline.hpp	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// This function should match SharkStack::CreateStackOverflowCheck
+inline void ZeroStack::overflow_check(int required_words, TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+
+  // Check the Zero stack
+  if (required_words > available_words()) {
+    handle_overflow(THREAD);
+    return;
+  }
+
+  // Check the ABI stack
+  address stack_top = thread->stack_base() - thread->stack_size();
+  int free_stack = ((address) &stack_top) - stack_top;
+  if (free_stack < shadow_pages_size()) {
+    handle_overflow(THREAD);
+    return;
+  }
+}
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,38 +60,43 @@
     }
 
     // Allocate and initialize our frame
-    thread->push_zero_frame(
-      EntryFrame::build(stack, parameters, parameter_words, call_wrapper));
+    EntryFrame *frame =
+      EntryFrame::build(parameters, parameter_words, call_wrapper, THREAD);
 
-    // Make the call
-    Interpreter::invoke_method(method, entry_point, THREAD);
-
-    // Store result depending on type
     if (!HAS_PENDING_EXCEPTION) {
-      switch (result_type) {
-      case T_INT:
-        *(jint *) result = *(jint *) stack->sp();
-        break;
-      case T_LONG:
-        *(jlong *) result = *(jlong *) stack->sp();
-        break;
-      case T_FLOAT:
-        *(jfloat *) result = *(jfloat *) stack->sp();
-        break;
-      case T_DOUBLE:
-        *(jdouble *) result = *(jdouble *) stack->sp();
-        break;
-      case T_OBJECT:
-        *(oop *) result = *(oop *) stack->sp();
-        break;
-      default:
-        ShouldNotReachHere();
+      // Push the frame
+      thread->push_zero_frame(frame);
+
+      // Make the call
+      Interpreter::invoke_method(method, entry_point, THREAD);
+
+      // Store the result
+      if (!HAS_PENDING_EXCEPTION) {
+        switch (result_type) {
+        case T_INT:
+          *(jint *) result = *(jint *) stack->sp();
+          break;
+        case T_LONG:
+          *(jlong *) result = *(jlong *) stack->sp();
+          break;
+        case T_FLOAT:
+          *(jfloat *) result = *(jfloat *) stack->sp();
+          break;
+        case T_DOUBLE:
+          *(jdouble *) result = *(jdouble *) stack->sp();
+          break;
+        case T_OBJECT:
+          *(oop *) result = *(oop *) stack->sp();
+          break;
+        default:
+          ShouldNotReachHere();
+        }
       }
+
+      // Unwind the frame
+      thread->pop_zero_frame();
     }
 
-    // Unwind our frame
-    thread->pop_zero_frame();
-
     // Tear down the stack if necessary
     if (stack_needs_teardown)
       stack->teardown();
@@ -226,13 +231,13 @@
   StubGenerator g(code, all);
 }
 
-EntryFrame *EntryFrame::build(ZeroStack*       stack,
-                              const intptr_t*  parameters,
+EntryFrame *EntryFrame::build(const intptr_t*  parameters,
                               int              parameter_words,
-                              JavaCallWrapper* call_wrapper) {
-  if (header_words + parameter_words > stack->available_words()) {
-    Unimplemented();
-  }
+                              JavaCallWrapper* call_wrapper,
+                              TRAPS) {
+
+  ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
+  stack->overflow_check(header_words + parameter_words, CHECK_NULL);
 
   stack->push(0); // next_frame, filled in later
   intptr_t *fp = stack->sp();
--- a/src/cpu/zero/vm/stubRoutines_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/cpu/zero/vm/stubRoutines_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,10 @@
     code_size2 = 0       // if these are too small.  Simply increase
   };                     // them if that happens.
 
+  enum method_handles_platform_dependent_constants {
+    method_handles_adapters_code_size = 0
+  };
+
 #ifdef IA32
   class x86 {
     friend class VMStructs;
--- a/src/os/linux/vm/attachListener_linux.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/linux/vm/attachListener_linux.cpp	Thu May 13 18:26:35 2010 -0700
@@ -192,7 +192,8 @@
     res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
   }
   if (res == -1) {
-    sprintf(path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
+    snprintf(path, PATH_MAX+1, "%s/.java_pid%d",
+             os::get_temp_directory(), os::current_process_id());
     strcpy(addr.sun_path, path);
     ::unlink(path);
     res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
@@ -460,13 +461,14 @@
   if (init_at_startup() || is_initialized()) {
     return false;               // initialized at startup or already initialized
   }
-  char fn[32];
+  char fn[128];
   sprintf(fn, ".attach_pid%d", os::current_process_id());
   int ret;
   struct stat64 st;
   RESTARTABLE(::stat64(fn, &st), ret);
   if (ret == -1) {
-    sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
+    snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
+             os::get_temp_directory(), os::current_process_id());
     RESTARTABLE(::stat64(fn, &st), ret);
   }
   if (ret == 0) {
--- a/src/os/linux/vm/os_linux.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Thu May 13 18:26:35 2010 -0700
@@ -22,6 +22,8 @@
  *
  */
 
+# define __STDC_FORMAT_MACROS
+
 // do not include  precompiled  header file
 # include "incls/_os_linux.cpp.incl"
 
@@ -53,6 +55,8 @@
 # include <sys/ipc.h>
 # include <sys/shm.h>
 # include <link.h>
+# include <stdint.h>
+# include <inttypes.h>
 
 #define MAX_PATH    (2 * K)
 
@@ -1518,7 +1522,10 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() { return "/tmp/"; }
+const char* os::get_temp_directory() {
+  const char *prop = Arguments::get_property("java.io.tmpdir");
+  return prop == NULL ? "/tmp" : prop;
+}
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
@@ -2312,7 +2319,8 @@
   char buf[40];
   int num = Atomic::add(1, &cnt);
 
-  sprintf(buf, "/tmp/hs-vm-%d-%d", os::current_process_id(), num);
+  snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
+           os::get_temp_directory(), os::current_process_id(), num);
   unlink(buf);
 
   int fd = open(buf, O_CREAT | O_RDWR, S_IRWXU);
@@ -2503,6 +2511,91 @@
     != MAP_FAILED;
 }
 
+// Linux uses a growable mapping for the stack, and if the mapping for
+// the stack guard pages is not removed when we detach a thread the
+// stack cannot grow beyond the pages where the stack guard was
+// mapped.  If at some point later in the process the stack expands to
+// that point, the Linux kernel cannot expand the stack any further
+// because the guard pages are in the way, and a segfault occurs.
+//
+// However, it's essential not to split the stack region by unmapping
+// a region (leaving a hole) that's already part of the stack mapping,
+// so if the stack mapping has already grown beyond the guard pages at
+// the time we create them, we have to truncate the stack mapping.
+// So, we need to know the extent of the stack mapping when
+// create_stack_guard_pages() is called.
+
+// Find the bounds of the stack mapping.  Return true for success.
+//
+// We only need this for stacks that are growable: at the time of
+// writing thread stacks don't use growable mappings (i.e. those
+// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
+// only applies to the main thread.
+static bool
+get_stack_bounds(uintptr_t *bottom, uintptr_t *top)
+{
+  FILE *f = fopen("/proc/self/maps", "r");
+  if (f == NULL)
+    return false;
+
+  while (!feof(f)) {
+    size_t dummy;
+    char *str = NULL;
+    ssize_t len = getline(&str, &dummy, f);
+    if (len == -1) {
+      fclose(f);
+      return false;
+    }
+
+    if (len > 0 && str[len-1] == '\n') {
+      str[len-1] = 0;
+      len--;
+    }
+
+    static const char *stack_str = "[stack]";
+    if (len > (ssize_t)strlen(stack_str)
+       && (strcmp(str + len - strlen(stack_str), stack_str) == 0)) {
+      if (sscanf(str, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
+        uintptr_t sp = (uintptr_t)__builtin_frame_address(0);
+        if (sp >= *bottom && sp <= *top) {
+          free(str);
+          fclose(f);
+          return true;
+        }
+      }
+    }
+    free(str);
+  }
+  fclose(f);
+  return false;
+}
+
+// If the (growable) stack mapping already extends beyond the point
+// where we're going to put our guard pages, truncate the mapping at
+// that point by munmap()ping it.  This ensures that when we later
+// munmap() the guard pages we don't leave a hole in the stack
+// mapping.
+bool os::create_stack_guard_pages(char* addr, size_t size) {
+  uintptr_t stack_extent, stack_base;
+  if (get_stack_bounds(&stack_extent, &stack_base)) {
+    if (stack_extent < (uintptr_t)addr)
+      ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
+  }
+
+  return os::commit_memory(addr, size);
+}
+
+// If this is a growable mapping, remove the guard pages entirely by
+// munmap()ping them.  If not, just call uncommit_memory().
+bool os::remove_stack_guard_pages(char* addr, size_t size) {
+  uintptr_t stack_extent, stack_base;
+  if (get_stack_bounds(&stack_extent, &stack_base)) {
+    return ::munmap(addr, size) == 0;
+  }
+
+  return os::uncommit_memory(addr, size);
+}
+
 static address _highest_vm_reserved_address = NULL;
 
 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
--- a/src/os/linux/vm/perfMemory_linux.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Thu May 13 18:26:35 2010 -0700
@@ -145,11 +145,11 @@
 
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
-  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
+  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
   char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
 
   // construct the path name to user specific tmp directory
-  snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
+  snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
 
   return dirname;
 }
@@ -331,8 +331,9 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 1);
+                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
     strcpy(usrdir_name, tmpdirname);
+    strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
     DIR* subdirp = os::opendir(usrdir_name);
--- a/src/os/solaris/dtrace/hotspot.d	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/solaris/dtrace/hotspot.d	Thu May 13 18:26:35 2010 -0700
@@ -25,9 +25,20 @@
 provider hotspot {
   probe class__loaded(char*, uintptr_t, void*, uintptr_t);
   probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
+  probe class__initialization__required(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
+  probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
   probe vm__init__begin();
   probe vm__init__end();
   probe vm__shutdown();
+  probe vmops__request(char*, uintptr_t, int);
+  probe vmops__begin(char*, uintptr_t, int);
+  probe vmops__end(char*, uintptr_t, int);
   probe gc__begin(uintptr_t);
   probe gc__end();
   probe mem__pool__gc__begin(
@@ -38,6 +49,12 @@
     uintptr_t, uintptr_t, uintptr_t, uintptr_t);
   probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
   probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe thread__sleep__begin(long long);
+  probe thread__sleep__end(int);
+  probe thread__yield();
+  probe thread__park__begin(uintptr_t, int, long long);
+  probe thread__park__end(uintptr_t);
+  probe thread__unpark(uintptr_t);
   probe method__compile__begin(
     char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
   probe method__compile__end(
--- a/src/os/solaris/vm/attachListener_solaris.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/solaris/vm/attachListener_solaris.cpp	Thu May 13 18:26:35 2010 -0700
@@ -375,7 +375,8 @@
     return -1;
   }
 
-  sprintf(door_path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
+  snprintf(door_path, sizeof(door_path), "%s/.java_pid%d",
+           os::get_temp_directory(), os::current_process_id());
   RESTARTABLE(::creat(door_path, S_IRUSR | S_IWUSR), fd);
 
   if (fd == -1) {
@@ -591,13 +592,14 @@
   if (init_at_startup() || is_initialized()) {
     return false;               // initialized at startup or already initialized
   }
-  char fn[32];
+  char fn[128];
   sprintf(fn, ".attach_pid%d", os::current_process_id());
   int ret;
   struct stat64 st;
   RESTARTABLE(::stat64(fn, &st), ret);
   if (ret == -1) {
-    sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
+    snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
+             os::get_temp_directory(), os::current_process_id());
     RESTARTABLE(::stat64(fn, &st), ret);
   }
   if (ret == 0) {
@@ -668,13 +670,18 @@
     }
   }
 
-  if (strcmp(name, "ExtendedDTraceProbes") != 0) {
-    out->print_cr("flag '%s' cannot be changed", name);
-    return JNI_ERR;
+  if (strcmp(name, "ExtendedDTraceProbes") == 0) {
+    DTrace::set_extended_dprobes(flag);
+    return JNI_OK;
   }
 
-  DTrace::set_extended_dprobes(flag);
-  return JNI_OK;
+  if (strcmp(name, "DTraceMonitorProbes") == 0) {
+    DTrace::set_monitor_dprobes(flag);
+    return JNI_OK;
+  }
+
+  out->print_cr("flag '%s' cannot be changed", name);
+  return JNI_ERR;
 }
 
 void AttachListener::pd_detachall() {
--- a/src/os/solaris/vm/os_solaris.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu May 13 18:26:35 2010 -0700
@@ -676,15 +676,6 @@
 }
 
 
-static char* get_property(char* name, char* buffer, int buffer_size) {
-  if (os::getenv(name, buffer, buffer_size)) {
-    return buffer;
-  }
-  static char empty[] = "";
-  return empty;
-}
-
-
 void os::init_system_properties_values() {
   char arch[12];
   sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
@@ -1826,7 +1817,10 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() { return "/tmp/"; }
+const char* os::get_temp_directory() {
+  const char *prop = Arguments::get_property("java.io.tmpdir");
+  return prop == NULL ? "/tmp" : prop;
+}
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
@@ -2709,6 +2703,14 @@
   }
 }
 
+bool os::create_stack_guard_pages(char* addr, size_t size) {
+  return os::commit_memory(addr, size);
+}
+
+bool os::remove_stack_guard_pages(char* addr, size_t size) {
+  return os::uncommit_memory(addr, size);
+}
+
 // Change the page size in a given range.
 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Thu May 13 18:26:35 2010 -0700
@@ -147,11 +147,11 @@
 
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
-  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
+  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
   char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
 
   // construct the path name to user specific tmp directory
-  snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
+  snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
 
   return dirname;
 }
@@ -322,8 +322,9 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 1);
+                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
     strcpy(usrdir_name, tmpdirname);
+    strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
     DIR* subdirp = os::opendir(usrdir_name);
--- a/src/os/windows/vm/os_windows.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Thu May 13 18:26:35 2010 -0700
@@ -998,15 +998,16 @@
 
 const char* os::dll_file_extension() { return ".dll"; }
 
-const char * os::get_temp_directory()
-{
-    static char path_buf[MAX_PATH];
-    if (GetTempPath(MAX_PATH, path_buf)>0)
-      return path_buf;
-    else{
-      path_buf[0]='\0';
-      return path_buf;
-    }
+const char* os::get_temp_directory() {
+  const char *prop = Arguments::get_property("java.io.tmpdir");
+  if (prop != 0) return prop;
+  static char path_buf[MAX_PATH];
+  if (GetTempPath(MAX_PATH, path_buf)>0)
+    return path_buf;
+  else{
+    path_buf[0]='\0';
+    return path_buf;
+  }
 }
 
 static bool file_exists(const char* filename) {
@@ -2803,6 +2804,14 @@
   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
 }
 
+bool os::create_stack_guard_pages(char* addr, size_t size) {
+  return os::commit_memory(addr, size);
+}
+
+bool os::remove_stack_guard_pages(char* addr, size_t size) {
+  return os::uncommit_memory(addr, size);
+}
+
 // Set protections specified
 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
                         bool is_committed) {
--- a/src/os/windows/vm/perfMemory_windows.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os/windows/vm/perfMemory_windows.cpp	Thu May 13 18:26:35 2010 -0700
@@ -149,11 +149,11 @@
 
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
-  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
+  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
   char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
 
   // construct the path name to user specific tmp directory
-  _snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
+  _snprintf(dirname, nbytes, "%s\\%s_%s", tmpdir, perfdir, user);
 
   return dirname;
 }
@@ -318,8 +318,9 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 1);
+                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
     strcpy(usrdir_name, tmpdirname);
+    strcat(usrdir_name, "\\");
     strcat(usrdir_name, dentry->d_name);
 
     DIR* subdirp = os::opendir(usrdir_name);
--- a/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,11 +29,10 @@
 //
 
 define_pd_global(bool,  DontYieldALot,           false);
+define_pd_global(intx,  ThreadStackSize,         1536);
 #ifdef _LP64
-define_pd_global(intx,  ThreadStackSize,         1536);
 define_pd_global(intx,  VMThreadStackSize,       1024);
 #else
-define_pd_global(intx,  ThreadStackSize,         1024);
 define_pd_global(intx,  VMThreadStackSize,       512);
 #endif // _LP64
 define_pd_global(intx,  SurvivorRatio,           8);
--- a/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,13 @@
 
  public:
   void set_last_Java_frame() {
-    JavaFrameAnchor *jfa = frame_anchor();
-    jfa->set_last_Java_sp((intptr_t *) top_zero_frame());
+    set_last_Java_frame(top_zero_frame());
   }
   void reset_last_Java_frame() {
-    JavaFrameAnchor *jfa = frame_anchor();
-    jfa->set_last_Java_sp(NULL);
+    set_last_Java_frame(NULL);
+  }
+  void set_last_Java_frame(ZeroFrame* frame) {
+    frame_anchor()->set_last_Java_sp((intptr_t *) frame);
   }
 
  private:
--- a/src/share/vm/adlc/formssel.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/adlc/formssel.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3861,6 +3861,8 @@
         strcmp(opType,"RoundFloat")==0 ||
         strcmp(opType,"ReverseBytesI")==0 ||
         strcmp(opType,"ReverseBytesL")==0 ||
+        strcmp(opType,"ReverseBytesUS")==0 ||
+        strcmp(opType,"ReverseBytesS")==0 ||
         strcmp(opType,"Replicate16B")==0 ||
         strcmp(opType,"Replicate8B")==0 ||
         strcmp(opType,"Replicate4B")==0 ||
--- a/src/share/vm/asm/codeBuffer.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/asm/codeBuffer.hpp	Thu May 13 18:26:35 2010 -0700
@@ -40,6 +40,7 @@
                  Exceptions,     // Offset where exception handler lives
                  Deopt,          // Offset where deopt handler lives
                  DeoptMH,        // Offset where MethodHandle deopt handler lives
+                 UnwindHandler,  // Offset to default unwind handler
                  max_Entries };
 
   // special value to note codeBlobs where profile (forte) stack walking is
@@ -59,6 +60,7 @@
     _values[Exceptions    ] = -1;
     _values[Deopt         ] = -1;
     _values[DeoptMH       ] = -1;
+    _values[UnwindHandler ] = -1;
   }
 
   int value(Entries e) { return _values[e]; }
--- a/src/share/vm/c1/c1_Canonicalizer.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,11 +222,15 @@
     }
   } else {
     LoadField* lf = x->array()->as_LoadField();
-    if (lf != NULL && lf->field()->is_constant()) {
-      ciObject* c = lf->field()->constant_value().as_object();
-      if (c->is_array()) {
-        ciArray* array = (ciArray*) c;
-        set_constant(array->length());
+    if (lf != NULL) {
+      ciField* field = lf->field();
+      if (field->is_constant() && field->is_static()) {
+        // final static field
+        ciObject* c = field->constant_value().as_object();
+        if (c->is_array()) {
+          ciArray* array = (ciArray*) c;
+          set_constant(array->length());
+        }
       }
     }
   }
--- a/src/share/vm/c1/c1_CodeStubs.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_CodeStubs.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -415,6 +415,28 @@
 };
 
 
+//------------------------------------------------------------------------------
+// DeoptimizeStub
+//
+class DeoptimizeStub : public CodeStub {
+private:
+  CodeEmitInfo* _info;
+
+public:
+  DeoptimizeStub(CodeEmitInfo* info) : _info(new CodeEmitInfo(info)) {}
+
+  virtual void emit_code(LIR_Assembler* e);
+  virtual CodeEmitInfo* info() const           { return _info; }
+  virtual bool is_exception_throw_stub() const { return true; }
+  virtual void visit(LIR_OpVisitState* visitor) {
+    visitor->do_slow_case(_info);
+  }
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const { out->print("DeoptimizeStub"); }
+#endif // PRODUCT
+};
+
+
 class SimpleExceptionStub: public CodeStub {
  private:
   LIR_Opr          _obj;
--- a/src/share/vm/c1/c1_Compilation.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_Compilation.cpp	Thu May 13 18:26:35 2010 -0700
@@ -229,6 +229,10 @@
   code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
   CHECK_BAILOUT();
 
+  // Emit the handler to remove the activation from the stack and
+  // dispatch to the caller.
+  offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler());
+
   // done
   masm()->flush();
 }
@@ -312,7 +316,7 @@
     implicit_exception_table(),
     compiler(),
     _env->comp_level(),
-    needs_debug_information(),
+    true,
     has_unsafe_access()
   );
 }
@@ -445,8 +449,6 @@
   assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
   _arena = Thread::current()->resource_area();
   _compilation = this;
-  _needs_debug_information = _env->jvmti_can_examine_or_deopt_anywhere() ||
-                               JavaMonitorsInStackTrace || AlwaysEmitDebugInfo || DeoptimizeALot;
   _exception_info_list = new ExceptionInfoList();
   _implicit_exception_table.set_size(0);
   compile_method();
--- a/src/share/vm/c1/c1_Compilation.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_Compilation.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,6 @@
   int                _max_spills;
   FrameMap*          _frame_map;
   C1_MacroAssembler* _masm;
-  bool               _needs_debug_information;
   bool               _has_exception_handlers;
   bool               _has_fpu_code;
   bool               _has_unsafe_access;
@@ -117,7 +116,6 @@
   // accessors
   ciEnv* env() const                             { return _env; }
   AbstractCompiler* compiler() const             { return _compiler; }
-  bool needs_debug_information() const           { return _needs_debug_information; }
   bool has_exception_handlers() const            { return _has_exception_handlers; }
   bool has_fpu_code() const                      { return _has_fpu_code; }
   bool has_unsafe_access() const                 { return _has_unsafe_access; }
@@ -132,7 +130,6 @@
   CodeOffsets* offsets()                         { return &_offsets; }
 
   // setters
-  void set_needs_debug_information(bool f)       { _needs_debug_information = f; }
   void set_has_exception_handlers(bool f)        { _has_exception_handlers = f; }
   void set_has_fpu_code(bool f)                  { _has_fpu_code = f; }
   void set_has_unsafe_access(bool f)             { _has_unsafe_access = f; }
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -829,12 +829,8 @@
     // should be left alone since there can be only one and all code
     // should dispatch to the same one.
     XHandler* h = handlers->handler_at(i);
-    if (h->handler_bci() != SynchronizationEntryBCI) {
-      h->set_entry_block(block_at(h->handler_bci()));
-    } else {
-      assert(h->entry_block()->is_set(BlockBegin::default_exception_handler_flag),
-             "should be the synthetic unlock block");
-    }
+    assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
+    h->set_entry_block(block_at(h->handler_bci()));
   }
   _jsr_xhandlers = handlers;
 }
@@ -1497,7 +1493,6 @@
 
 Dependencies* GraphBuilder::dependency_recorder() const {
   assert(DeoptC1, "need debug information");
-  compilation()->set_needs_debug_information(true);
   return compilation()->dependency_recorder();
 }
 
@@ -1524,18 +1519,14 @@
     code = Bytecodes::_invokespecial;
   }
 
-  if (code == Bytecodes::_invokedynamic) {
-    BAILOUT("invokedynamic NYI"); // FIXME
-    return;
-  }
-
   // NEEDS_CLEANUP
   // I've added the target-is_loaded() test below but I don't really understand
   // how klass->is_loaded() can be true and yet target->is_loaded() is false.
   // this happened while running the JCK invokevirtual tests under doit.  TKR
   ciMethod* cha_monomorphic_target = NULL;
   ciMethod* exact_target = NULL;
-  if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded()) {
+  if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
+      !target->is_method_handle_invoke()) {
     Value receiver = NULL;
     ciInstanceKlass* receiver_klass = NULL;
     bool type_is_exact = false;
@@ -1681,11 +1672,20 @@
   CHECK_BAILOUT();
 
   // inlining not successful => standard invoke
-  bool is_static = code == Bytecodes::_invokestatic;
+  bool is_loaded = target->is_loaded();
+  bool has_receiver =
+    code == Bytecodes::_invokespecial   ||
+    code == Bytecodes::_invokevirtual   ||
+    code == Bytecodes::_invokeinterface;
+  bool is_invokedynamic = code == Bytecodes::_invokedynamic;
   ValueType* result_type = as_ValueType(target->return_type());
+
+  // We require the debug info to be the "state before" because
+  // invokedynamics may deoptimize.
+  ValueStack* state_before = is_invokedynamic ? state()->copy() : NULL;
+
   Values* args = state()->pop_arguments(target->arg_size_no_receiver());
-  Value recv = is_static ? NULL : apop();
-  bool is_loaded = target->is_loaded();
+  Value recv = has_receiver ? apop() : NULL;
   int vtable_index = methodOopDesc::invalid_vtable_index;
 
 #ifdef SPARC
@@ -1723,7 +1723,7 @@
     profile_call(recv, target_klass);
   }
 
-  Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target);
+  Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
   // push result
   append_split(result);
 
@@ -2862,21 +2862,6 @@
   _initial_state = state_at_entry();
   start_block->merge(_initial_state);
 
-  BlockBegin* sync_handler = NULL;
-  if (method()->is_synchronized() || _compilation->env()->dtrace_method_probes()) {
-    // setup an exception handler to do the unlocking and/or notification
-    sync_handler = new BlockBegin(-1);
-    sync_handler->set(BlockBegin::exception_entry_flag);
-    sync_handler->set(BlockBegin::is_on_work_list_flag);
-    sync_handler->set(BlockBegin::default_exception_handler_flag);
-
-    ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
-    XHandler* h = new XHandler(desc);
-    h->set_entry_block(sync_handler);
-    scope_data()->xhandlers()->append(h);
-    scope_data()->set_has_handler();
-  }
-
   // complete graph
   _vmap        = new ValueMap();
   scope->compute_lock_stack_size();
@@ -2927,19 +2912,6 @@
   }
   CHECK_BAILOUT();
 
-  if (sync_handler && sync_handler->state() != NULL) {
-    Value lock = NULL;
-    if (method()->is_synchronized()) {
-      lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) :
-                                     _initial_state->local_at(0);
-
-      sync_handler->state()->unlock();
-      sync_handler->state()->lock(scope, lock);
-
-    }
-    fill_sync_handler(lock, sync_handler, true);
-  }
-
   _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
 
   eliminate_redundant_phis(_start);
--- a/src/share/vm/c1/c1_IR.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_IR.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -259,10 +259,10 @@
 }
 
 
-void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
+void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
   // record the safepoint before recording the debug info for enclosing scopes
   recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
-  _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/);
+  _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
   recorder->end_safepoint(pc_offset);
 }
 
--- a/src/share/vm/c1/c1_IR.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_IR.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -242,7 +242,7 @@
   //Whether we should reexecute this bytecode for deopt
   bool should_reexecute();
 
-  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost) {
+  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost, bool is_method_handle_invoke = false) {
     if (caller() != NULL) {
       // Order is significant:  Must record caller first.
       caller()->record_debug_info(recorder, pc_offset, false/*topmost*/);
@@ -252,7 +252,6 @@
     DebugToken* monvals = recorder->create_monitor_values(monitors());
     // reexecute allowed only for the topmost frame
     bool reexecute = topmost ? should_reexecute() : false;
-    bool is_method_handle_invoke = false;
     bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
     recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
   }
@@ -303,7 +302,7 @@
   int bci() const                                { return _bci; }
 
   void add_register_oop(LIR_Opr opr);
-  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
+  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
 
   CodeEmitInfo* next() const        { return _next; }
   void set_next(CodeEmitInfo* next) { _next = next; }
--- a/src/share/vm/c1/c1_Instruction.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_Instruction.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -334,13 +334,14 @@
 
 
 Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args,
-               int vtable_index, ciMethod* target)
+               int vtable_index, ciMethod* target, ValueStack* state_before)
   : StateSplit(result_type)
   , _code(code)
   , _recv(recv)
   , _args(args)
   , _vtable_index(vtable_index)
   , _target(target)
+  , _state_before(state_before)
 {
   set_flag(TargetIsLoadedFlag,   target->is_loaded());
   set_flag(TargetIsFinalFlag,    target_is_loaded() && target->is_final_method());
@@ -355,6 +356,9 @@
   _signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0));
   if (has_receiver()) {
     _signature->append(as_BasicType(receiver()->type()));
+  } else if (is_invokedynamic()) {
+    // Add the synthetic MethodHandle argument to the signature.
+    _signature->append(T_OBJECT);
   }
   for (int i = 0; i < number_of_arguments(); i++) {
     ValueType* t = argument_at(i)->type();
@@ -364,6 +368,13 @@
 }
 
 
+void Invoke::state_values_do(void f(Value*)) {
+  StateSplit::state_values_do(f);
+  if (state_before() != NULL) state_before()->values_do(f);
+  if (state()        != NULL) state()->values_do(f);
+}
+
+
 // Implementation of Contant
 intx Constant::hash() const {
   if (_state == NULL) {
--- a/src/share/vm/c1/c1_Instruction.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_Instruction.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1134,17 +1134,18 @@
 
 LEAF(Invoke, StateSplit)
  private:
-  Bytecodes::Code           _code;
-  Value                     _recv;
-  Values*                   _args;
-  BasicTypeList*            _signature;
-  int                       _vtable_index;
-  ciMethod*                 _target;
+  Bytecodes::Code _code;
+  Value           _recv;
+  Values*         _args;
+  BasicTypeList*  _signature;
+  int             _vtable_index;
+  ciMethod*       _target;
+  ValueStack*     _state_before;  // Required for deoptimization.
 
  public:
   // creation
   Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args,
-         int vtable_index, ciMethod* target);
+         int vtable_index, ciMethod* target, ValueStack* state_before);
 
   // accessors
   Bytecodes::Code code() const                   { return _code; }
@@ -1155,6 +1156,7 @@
   int vtable_index() const                       { return _vtable_index; }
   BasicTypeList* signature() const               { return _signature; }
   ciMethod* target() const                       { return _target; }
+  ValueStack* state_before() const               { return _state_before; }
 
   // Returns false if target is not loaded
   bool target_is_final() const                   { return check_flag(TargetIsFinalFlag); }
@@ -1162,6 +1164,9 @@
   // Returns false if target is not loaded
   bool target_is_strictfp() const                { return check_flag(TargetIsStrictfpFlag); }
 
+  // JSR 292 support
+  bool is_invokedynamic() const                  { return code() == Bytecodes::_invokedynamic; }
+
   // generic
   virtual bool can_trap() const                  { return true; }
   virtual void input_values_do(void f(Value*)) {
@@ -1169,6 +1174,7 @@
     if (has_receiver()) f(&_recv);
     for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
   }
+  virtual void state_values_do(void f(Value*));
 };
 
 
@@ -1622,11 +1628,10 @@
     backward_branch_target_flag   = 1 << 4,
     is_on_work_list_flag          = 1 << 5,
     was_visited_flag              = 1 << 6,
-    default_exception_handler_flag = 1 << 8, // identify block which represents the default exception handler
-    parser_loop_header_flag       = 1 << 9,  // set by parser to identify blocks where phi functions can not be created on demand
-    critical_edge_split_flag      = 1 << 10, // set for all blocks that are introduced when critical edges are split
-    linear_scan_loop_header_flag  = 1 << 11, // set during loop-detection for LinearScan
-    linear_scan_loop_end_flag     = 1 << 12  // set during loop-detection for LinearScan
+    parser_loop_header_flag       = 1 << 7,  // set by parser to identify blocks where phi functions can not be created on demand
+    critical_edge_split_flag      = 1 << 8, // set for all blocks that are introduced when critical edges are split
+    linear_scan_loop_header_flag  = 1 << 9, // set during loop-detection for LinearScan
+    linear_scan_loop_end_flag     = 1 << 10  // set during loop-detection for LinearScan
   };
 
   void set(Flag f)                               { _flags |= f; }
--- a/src/share/vm/c1/c1_LIR.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_LIR.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,7 +76,7 @@
       return LIR_OprFact::oopConst(type->as_ObjectType()->encoding());
     }
   }
-  case addressTag: return LIR_OprFact::intConst(type->as_AddressConstant()->value());
+  case addressTag: return LIR_OprFact::addressConst(type->as_AddressConstant()->value());
   case intTag    : return LIR_OprFact::intConst(type->as_IntConstant()->value());
   case floatTag  : return LIR_OprFact::floatConst(type->as_FloatConstant()->value());
   case longTag   : return LIR_OprFact::longConst(type->as_LongConstant()->value());
@@ -89,7 +89,7 @@
 LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) {
   switch (type->tag()) {
     case objectTag: return LIR_OprFact::oopConst(NULL);
-    case addressTag:
+    case addressTag:return LIR_OprFact::addressConst(0);
     case intTag:    return LIR_OprFact::intConst(0);
     case floatTag:  return LIR_OprFact::floatConst(0.0);
     case longTag:   return LIR_OprFact::longConst(0);
@@ -626,8 +626,7 @@
       break;
     }
 
-    case lir_throw:
-    case lir_unwind: {
+    case lir_throw: {
       assert(op->as_Op2() != NULL, "must be");
       LIR_Op2* op2 = (LIR_Op2*)op;
 
@@ -639,6 +638,17 @@
       break;
     }
 
+    case lir_unwind: {
+      assert(op->as_Op1() != NULL, "must be");
+      LIR_Op1* op1 = (LIR_Op1*)op;
+
+      assert(op1->_info == NULL, "no info");
+      assert(op1->_opr->is_valid(), "exception oop");         do_input(op1->_opr);
+      assert(op1->_result->is_illegal(), "no result");
+
+      break;
+    }
+
 
     case lir_tan:
     case lir_sin:
@@ -689,9 +699,10 @@
     case lir_static_call:
     case lir_optvirtual_call:
     case lir_icvirtual_call:
-    case lir_virtual_call: {
-      assert(op->as_OpJavaCall() != NULL, "must be");
-      LIR_OpJavaCall* opJavaCall = (LIR_OpJavaCall*)op;
+    case lir_virtual_call:
+    case lir_dynamic_call: {
+      LIR_OpJavaCall* opJavaCall = op->as_OpJavaCall();
+      assert(opJavaCall != NULL, "must be");
 
       if (opJavaCall->_receiver->is_valid())     do_input(opJavaCall->_receiver);
 
@@ -704,6 +715,7 @@
       }
 
       if (opJavaCall->_info)                     do_info(opJavaCall->_info);
+      if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
       do_call();
       if (opJavaCall->_result->is_valid())       do_output(opJavaCall->_result);
 
@@ -1410,6 +1422,7 @@
 // LIR_Address
 void LIR_Const::print_value_on(outputStream* out) const {
   switch (type()) {
+    case T_ADDRESS:out->print("address:%d",as_jint());          break;
     case T_INT:    out->print("int:%d",   as_jint());           break;
     case T_LONG:   out->print("lng:%lld", as_jlong());          break;
     case T_FLOAT:  out->print("flt:%f",   as_jfloat());         break;
@@ -1590,6 +1603,7 @@
      case lir_optvirtual_call:       s = "optvirtual";    break;
      case lir_icvirtual_call:        s = "icvirtual";     break;
      case lir_virtual_call:          s = "virtual";       break;
+     case lir_dynamic_call:          s = "dynamic";       break;
      // LIR_OpArrayCopy
      case lir_arraycopy:             s = "arraycopy";     break;
      // LIR_OpLock
--- a/src/share/vm/c1/c1_LIR.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_LIR.hpp	Thu May 13 18:26:35 2010 -0700
@@ -85,9 +85,10 @@
 
   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
+  void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
 
  public:
-  LIR_Const(jint i)                              { _value.set_type(T_INT);     _value.set_jint(i); }
+  LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
@@ -105,7 +106,7 @@
   virtual BasicType type()       const { return _value.get_type(); }
   virtual LIR_Const* as_constant()     { return this; }
 
-  jint      as_jint()    const         { type_check(T_INT   ); return _value.get_jint(); }
+  jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
@@ -120,7 +121,7 @@
 #endif
 
 
-  jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT); return _value.get_jint(); }
+  jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
   jint      as_jint_lo_bits() const    {
     if (type() == T_DOUBLE) {
       return low(jlong_cast(_value.get_jdouble()));
@@ -718,6 +719,7 @@
   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
+  static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 
   static LIR_Opr value_type(ValueType* type);
   static LIR_Opr dummy_value_type(ValueType* type);
@@ -799,6 +801,7 @@
       , lir_monaddr
       , lir_roundfp
       , lir_safepoint
+      , lir_unwind
   , end_op1
   , begin_op2
       , lir_cmp
@@ -828,7 +831,6 @@
       , lir_ushr
       , lir_alloc_array
       , lir_throw
-      , lir_unwind
       , lir_compare_to
   , end_op2
   , begin_op3
@@ -840,6 +842,7 @@
       , lir_optvirtual_call
       , lir_icvirtual_call
       , lir_virtual_call
+      , lir_dynamic_call
   , end_opJavaCall
   , begin_opArrayCopy
       , lir_arraycopy
@@ -1052,6 +1055,16 @@
   LIR_Opr receiver() const                       { return _receiver; }
   ciMethod* method() const                       { return _method;   }
 
+  // JSR 292 support.
+  bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
+  bool is_method_handle_invoke() const {
+    return
+      is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
+      ||
+      (method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
+       method()->name()           == ciSymbol::invoke_name());
+  }
+
   intptr_t vtable_offset() const {
     assert(_code == lir_virtual_call, "only have vtable for real vcall");
     return (intptr_t) addr();
@@ -1766,6 +1779,10 @@
                     intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
     append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
   }
+  void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
+                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
+    append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
+  }
 
   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
   void word_align()                              { append(new LIR_Op0(lir_word_align)); }
@@ -1810,8 +1827,12 @@
   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
 
   void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
-  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
-  void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
+  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
+    append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
+  }
+  void unwind_exception(LIR_Opr exceptionOop) {
+    append(new LIR_Op1(lir_unwind, exceptionOop));
+  }
 
   void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
     append(new LIR_Op2(lir_compare_to,  left, right, dst));
--- a/src/share/vm/c1/c1_LIRAssembler.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -301,9 +301,9 @@
 }
 
 
-void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
+void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
   flush_debug_info(pc_offset);
-  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
+  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
   if (cinfo->exception_handlers() != NULL) {
     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
   }
@@ -413,6 +413,12 @@
 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
   verify_oop_map(op->info());
 
+  // JSR 292
+  // Preserve the SP over MethodHandle call sites.
+  if (op->is_method_handle_invoke()) {
+    preserve_SP(op);
+  }
+
   if (os::is_MP()) {
     // must align calls sites, otherwise they can't be updated atomically on MP hardware
     align_call(op->code());
@@ -423,19 +429,25 @@
 
   switch (op->code()) {
   case lir_static_call:
-    call(op->addr(), relocInfo::static_call_type, op->info());
+    call(op, relocInfo::static_call_type);
     break;
   case lir_optvirtual_call:
-    call(op->addr(), relocInfo::opt_virtual_call_type, op->info());
+  case lir_dynamic_call:
+    call(op, relocInfo::opt_virtual_call_type);
     break;
   case lir_icvirtual_call:
-    ic_call(op->addr(), op->info());
+    ic_call(op);
     break;
   case lir_virtual_call:
-    vtable_call(op->vtable_offset(), op->info());
+    vtable_call(op);
     break;
   default: ShouldNotReachHere();
   }
+
+  if (op->is_method_handle_invoke()) {
+    restore_SP(op);
+  }
+
 #if defined(X86) && defined(TIERED)
   // C2 leave fpu stack dirty clean it
   if (UseSSE < 2) {
@@ -540,6 +552,10 @@
       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
       break;
 
+    case lir_unwind:
+      unwind_op(op->in_opr());
+      break;
+
     default:
       Unimplemented();
       break;
@@ -695,8 +711,7 @@
       break;
 
     case lir_throw:
-    case lir_unwind:
-      throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind);
+      throw_op(op->in_opr1(), op->in_opr2(), op->info());
       break;
 
     default:
--- a/src/share/vm/c1/c1_LIRAssembler.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Thu May 13 18:26:35 2010 -0700
@@ -39,6 +39,8 @@
   Instruction*       _pending_non_safepoint;
   int                _pending_non_safepoint_offset;
 
+  Label              _unwind_handler_entry;
+
 #ifdef ASSERT
   BlockList          _branch_target_blocks;
   void check_no_unbound_labels();
@@ -82,7 +84,7 @@
   Address as_Address_hi(LIR_Address* addr);
 
   // debug information
-  void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
+  void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
   void add_debug_info_for_branch(CodeEmitInfo* info);
   void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
   void add_debug_info_for_div0_here(CodeEmitInfo* info);
@@ -134,6 +136,7 @@
 
   // code patterns
   int  emit_exception_handler();
+  int  emit_unwind_handler();
   void emit_exception_entries(ExceptionInfoList* info_list);
   int  emit_deopt_handler();
 
@@ -205,15 +208,20 @@
   void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
   void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result);
 
-  void ic_call(address destination, CodeEmitInfo* info);
-  void vtable_call(int vtable_offset, CodeEmitInfo* info);
-  void call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info);
+  void call(        LIR_OpJavaCall* op, relocInfo::relocType rtype);
+  void ic_call(     LIR_OpJavaCall* op);
+  void vtable_call( LIR_OpJavaCall* op);
+
+  // JSR 292
+  void preserve_SP(LIR_OpJavaCall* op);
+  void restore_SP( LIR_OpJavaCall* op);
 
   void osr_entry();
 
   void build_frame();
 
-  void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind);
+  void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info);
+  void unwind_op(LIR_Opr exceptionOop);
   void monitor_address(int monitor_ix, LIR_Opr dst);
 
   void align_backward_branch_target();
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1765,35 +1765,17 @@
     __ null_check(exception_opr, new CodeEmitInfo(info, true));
   }
 
-  if (compilation()->env()->jvmti_can_post_on_exceptions() &&
-      !block()->is_set(BlockBegin::default_exception_handler_flag)) {
+  if (compilation()->env()->jvmti_can_post_on_exceptions()) {
     // we need to go through the exception lookup path to get JVMTI
     // notification done
     unwind = false;
   }
 
-  assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind,
-         "should be no more handlers to dispatch to");
-
-  if (compilation()->env()->dtrace_method_probes() &&
-      block()->is_set(BlockBegin::default_exception_handler_flag)) {
-    // notify that this frame is unwinding
-    BasicTypeList signature;
-    signature.append(T_INT);    // thread
-    signature.append(T_OBJECT); // methodOop
-    LIR_OprList* args = new LIR_OprList();
-    args->append(getThreadPointer());
-    LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->constant_encoding(), meth);
-    args->append(meth);
-    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
-  }
-
   // move exception oop into fixed register
   __ move(exception_opr, exceptionOopOpr());
 
   if (unwind) {
-    __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
+    __ unwind_exception(exceptionOopOpr());
   } else {
     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
   }
@@ -2284,7 +2266,7 @@
 
 
 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
-  int i = x->has_receiver() ? 1 : 0;
+  int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
   for (; i < args->length(); i++) {
     LIRItem* param = args->at(i);
     LIR_Opr loc = arg_list->at(i);
@@ -2322,6 +2304,10 @@
     LIRItem* receiver = new LIRItem(x->receiver(), this);
     argument_items->append(receiver);
   }
+  if (x->is_invokedynamic()) {
+    // Insert a dummy for the synthetic MethodHandle argument.
+    argument_items->append(NULL);
+  }
   int idx = x->has_receiver() ? 1 : 0;
   for (int i = 0; i < x->number_of_arguments(); i++) {
     LIRItem* param = new LIRItem(x->argument_at(i), this);
@@ -2371,6 +2357,9 @@
 
   CodeEmitInfo* info = state_for(x, x->state());
 
+  // invokedynamics can deoptimize.
+  CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
+
   invoke_load_arguments(x, args, arg_list);
 
   if (x->has_receiver()) {
@@ -2407,6 +2396,47 @@
         __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
       }
       break;
+    case Bytecodes::_invokedynamic: {
+      ciBytecodeStream bcs(x->scope()->method());
+      bcs.force_bci(x->bci());
+      assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
+      ciCPCache* cpcache = bcs.get_cpcache();
+
+      // Get CallSite offset from constant pool cache pointer.
+      int index = bcs.get_method_index();
+      size_t call_site_offset = cpcache->get_f1_offset(index);
+
+      // If this invokedynamic call site hasn't been executed yet in
+      // the interpreter, the CallSite object in the constant pool
+      // cache is still null and we need to deoptimize.
+      if (cpcache->is_f1_null_at(index)) {
+        // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
+        // clone all handlers.  This is handled transparently in other
+        // places by the CodeEmitInfo cloning logic but is handled
+        // specially here because a stub isn't being used.
+        x->set_exception_handlers(new XHandlers(x->exception_handlers()));
+
+        DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
+        __ jump(deopt_stub);
+      }
+
+      // Use the receiver register for the synthetic MethodHandle
+      // argument.
+      receiver = LIR_Assembler::receiverOpr();
+      LIR_Opr tmp = new_register(objectType);
+
+      // Load CallSite object from constant pool cache.
+      __ oop2reg(cpcache->constant_encoding(), tmp);
+      __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
+
+      // Load target MethodHandle from CallSite object.
+      __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
+
+      __ call_dynamic(x->target(), receiver, result_register,
+                      SharedRuntime::get_resolve_opt_virtual_call_stub(),
+                      arg_list, info);
+      break;
+    }
     default:
       ShouldNotReachHere();
       break;
--- a/src/share/vm/c1/c1_LinearScan.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2479,6 +2479,15 @@
       return 2;
     }
 
+    case T_ADDRESS: {
+#ifdef _LP64
+      scope_values->append(new ConstantLongValue(c->as_jint()));
+#else
+      scope_values->append(new ConstantIntValue(c->as_jint()));
+#endif
+      return 1;
+    }
+
     default:
       ShouldNotReachHere();
       return -1;
@@ -2599,12 +2608,17 @@
     } else if (opr->is_double_xmm()) {
       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
+#  ifdef _LP64
+      first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
+      second = &_int_0_scope_value;
+#  else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
       if (true) {
         VMReg rname_second = rname_first->next();
         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
       }
+#  endif
 #endif
 
     } else if (opr->is_double_fpu()) {
@@ -2630,13 +2644,17 @@
 #endif
 
       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
-
+#ifdef _LP64
+      first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
+      second = &_int_0_scope_value;
+#else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
       if (true) {
         VMReg rname_second = rname_first->next();
         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
       }
+#endif
 
     } else {
       ShouldNotReachHere();
@@ -2796,9 +2814,6 @@
 
 
 void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) {
-  if (!compilation()->needs_debug_information()) {
-    return;
-  }
   TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
 
   IRScope* innermost_scope = info->scope();
--- a/src/share/vm/c1/c1_MacroAssembler.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_MacroAssembler.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 
   void inline_cache_check(Register receiver, Register iCache);
   void build_frame(int frame_size_in_bytes);
-  void method_exit(bool restore_frame);
+  void remove_frame(int frame_size_in_bytes);
 
   void unverified_entry(Register receiver, Register ic_klass);
   void verified_entry();
--- a/src/share/vm/c1/c1_globals.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/c1/c1_globals.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -252,9 +252,6 @@
   develop(bool, BailoutOnExceptionHandlers, false,                          \
           "bailout of compilation for methods with exception handlers")     \
                                                                             \
-  develop(bool, AlwaysEmitDebugInfo, false,                                 \
-          "always emit debug info")                                         \
-                                                                            \
   develop(bool, InstallMethods, true,                                       \
           "Install methods at the end of successful compilations")          \
                                                                             \
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1408,8 +1408,11 @@
 }
 
 void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) {
-  if(!has_dependencies())
-    return;
+  if (ciEnv::current()->jvmti_can_hotswap_or_post_breakpoint()) {
+    // Also record evol dependencies so redefinition of the
+    // callee will trigger recompilation.
+    deps->assert_evol_method(method());
+  }
   for (int i = 0; i < _dependencies.length(); i+=2) {
     ciKlass *k = _dependencies[i]->as_klass();
     ciMethod *m = _dependencies[i+1]->as_method();
--- a/src/share/vm/ci/ciCPCache.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/ci/ciCPCache.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2009-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,16 @@
 
 
 // ------------------------------------------------------------------
+// ciCPCache::is_f1_null_at
+bool ciCPCache::is_f1_null_at(int index) {
+  VM_ENTRY_MARK;
+  constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop();
+  oop f1 = cpcache->secondary_entry_at(index)->f1();
+  return (f1 == NULL);
+}
+
+
+// ------------------------------------------------------------------
 // ciCPCache::print
 //
 // Print debugging information about the cache.
--- a/src/share/vm/ci/ciCPCache.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/ci/ciCPCache.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2009-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,5 +39,7 @@
   // requested entry.
   size_t get_f1_offset(int index);
 
+  bool is_f1_null_at(int index);
+
   void print();
 };
--- a/src/share/vm/ci/ciConstant.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/ci/ciConstant.cpp	Thu May 13 18:26:35 2010 -0700
@@ -36,7 +36,7 @@
              basictype_to_str(basic_type()));
   switch (basic_type()) {
   case T_BOOLEAN:
-    tty->print("%s", bool_to_str(_value._int == 0));
+    tty->print("%s", bool_to_str(_value._int != 0));
     break;
   case T_CHAR:
   case T_BYTE:
--- a/src/share/vm/ci/ciEnv.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Thu May 13 18:26:35 2010 -0700
@@ -176,7 +176,6 @@
   // Get Jvmti capabilities under lock to get consistant values.
   MutexLocker mu(JvmtiThreadState_lock);
   _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
-  _jvmti_can_examine_or_deopt_anywhere  = JvmtiExport::can_examine_or_deopt_anywhere();
   _jvmti_can_access_local_variables     = JvmtiExport::can_access_local_variables();
   _jvmti_can_post_on_exceptions         = JvmtiExport::can_post_on_exceptions();
 }
@@ -385,11 +384,6 @@
                                                      KILL_COMPILE_ON_FATAL_(fail_type));
   }
 
-  if (found_klass != NULL) {
-    // Found it.  Build a CI handle.
-    return get_object(found_klass)->as_klass();
-  }
-
   // If we fail to find an array klass, look again for its element type.
   // The element type may be available either locally or via constraints.
   // In either case, if we can find the element type in the system dictionary,
@@ -414,6 +408,11 @@
     }
   }
 
+  if (found_klass != NULL) {
+    // Found it.  Build a CI handle.
+    return get_object(found_klass)->as_klass();
+  }
+
   if (require_local)  return NULL;
   // Not yet loaded into the VM, or not governed by loader constraints.
   // Make a CI representative for it.
@@ -887,8 +886,6 @@
     if (!failing() &&
         ( (!jvmti_can_hotswap_or_post_breakpoint() &&
            JvmtiExport::can_hotswap_or_post_breakpoint()) ||
-          (!jvmti_can_examine_or_deopt_anywhere() &&
-           JvmtiExport::can_examine_or_deopt_anywhere()) ||
           (!jvmti_can_access_local_variables() &&
            JvmtiExport::can_access_local_variables()) ||
           (!jvmti_can_post_on_exceptions() &&
--- a/src/share/vm/ci/ciEnv.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/ci/ciEnv.hpp	Thu May 13 18:26:35 2010 -0700
@@ -55,7 +55,6 @@
 
   // Cache Jvmti state
   bool  _jvmti_can_hotswap_or_post_breakpoint;
-  bool  _jvmti_can_examine_or_deopt_anywhere;
   bool  _jvmti_can_access_local_variables;
   bool  _jvmti_can_post_on_exceptions;
 
@@ -257,7 +256,6 @@
   // Cache Jvmti state
   void  cache_jvmti_state();
   bool  jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
-  bool  jvmti_can_examine_or_deopt_anywhere()  const { return _jvmti_can_examine_or_deopt_anywhere; }
   bool  jvmti_can_access_local_variables()     const { return _jvmti_can_access_local_variables; }
   bool  jvmti_can_post_on_exceptions()         const { return _jvmti_can_post_on_exceptions; }
 
--- a/src/share/vm/classfile/classFileParser.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu May 13 18:26:35 2010 -0700
@@ -2956,8 +2956,8 @@
 #endif
     bool compact_fields   = CompactFields;
     int  allocation_style = FieldsAllocationStyle;
-    if( allocation_style < 0 || allocation_style > 1 ) { // Out of range?
-      assert(false, "0 <= FieldsAllocationStyle <= 1");
+    if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
+      assert(false, "0 <= FieldsAllocationStyle <= 2");
       allocation_style = 1; // Optimistic
     }
 
@@ -2993,6 +2993,25 @@
     } else if( allocation_style == 1 ) {
       // Fields order: longs/doubles, ints, shorts/chars, bytes, oops
       next_nonstatic_double_offset = next_nonstatic_field_offset;
+    } else if( allocation_style == 2 ) {
+      // Fields allocation: oops fields in super and sub classes are together.
+      if( nonstatic_field_size > 0 && super_klass() != NULL &&
+          super_klass->nonstatic_oop_map_size() > 0 ) {
+        int map_size = super_klass->nonstatic_oop_map_size();
+        OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps();
+        OopMapBlock* last_map = first_map + map_size - 1;
+        int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
+        if (next_offset == next_nonstatic_field_offset) {
+          allocation_style = 0;   // allocate oops first
+          next_nonstatic_oop_offset    = next_nonstatic_field_offset;
+          next_nonstatic_double_offset = next_nonstatic_oop_offset +
+                                         (nonstatic_oop_count * heapOopSize);
+        }
+      }
+      if( allocation_style == 2 ) {
+        allocation_style = 1;     // allocate oops last
+        next_nonstatic_double_offset = next_nonstatic_field_offset;
+      }
     } else {
       ShouldNotReachHere();
     }
--- a/src/share/vm/classfile/loaderConstraints.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/classfile/loaderConstraints.cpp	Thu May 13 18:26:35 2010 -0700
@@ -334,33 +334,6 @@
   return NULL;
 }
 
-
-klassOop LoaderConstraintTable::find_constrained_elem_klass(symbolHandle name,
-                                                            symbolHandle elem_name,
-                                                            Handle loader,
-                                                            TRAPS) {
-  LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
-  if (p != NULL) {
-    assert(p->klass() == NULL, "Expecting null array klass");
-
-    // The array name has a constraint, but it will not have a class. Check
-    // each loader for an associated elem
-    for (int i = 0; i < p->num_loaders(); i++) {
-      Handle no_protection_domain;
-
-      klassOop k = SystemDictionary::find(elem_name, p->loader(i), no_protection_domain, THREAD);
-      if (k != NULL) {
-        // Return the first elem klass found.
-        return k;
-      }
-    }
-  }
-
-  // No constraints, or else no klass loaded yet.
-  return NULL;
-}
-
-
 void LoaderConstraintTable::ensure_loader_constraint_capacity(
                                                      LoaderConstraintEntry *p,
                                                     int nfree) {
--- a/src/share/vm/classfile/loaderConstraints.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/classfile/loaderConstraints.hpp	Thu May 13 18:26:35 2010 -0700
@@ -66,9 +66,6 @@
   //                                           bool is_method, TRAPS)
 
   klassOop find_constrained_klass(symbolHandle name, Handle loader);
-  klassOop find_constrained_elem_klass(symbolHandle name, symbolHandle elem_name,
-                                       Handle loader, TRAPS);
-
 
   // Class loader constraints
 
--- a/src/share/vm/classfile/systemDictionary.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Thu May 13 18:26:35 2010 -0700
@@ -2178,9 +2178,8 @@
   // a loader constraint that would require this loader to return the
   // klass that is already loaded.
   if (FieldType::is_array(class_name())) {
-    // Array classes are hard because their klassOops are not kept in the
-    // constraint table. The array klass may be constrained, but the elem class
-    // may not be.
+    // For array classes, their klassOops are not kept in the
+    // constraint table. The element klassOops are.
     jint dimension;
     symbolOop object_key;
     BasicType t = FieldType::get_array_info(class_name(), &dimension,
@@ -2190,8 +2189,9 @@
     } else {
       symbolHandle elem_name(THREAD, object_key);
       MutexLocker mu(SystemDictionary_lock, THREAD);
-      klass = constraints()->find_constrained_elem_klass(class_name, elem_name, class_loader, THREAD);
+      klass = constraints()->find_constrained_klass(elem_name, class_loader);
     }
+    // If element class already loaded, allocate array klass
     if (klass != NULL) {
       klass = Klass::cast(klass)->array_klass_or_null(dimension);
     }
@@ -2209,22 +2209,38 @@
                                              Handle class_loader1,
                                              Handle class_loader2,
                                              Thread* THREAD) {
-  unsigned int d_hash1 = dictionary()->compute_hash(class_name, class_loader1);
+  symbolHandle constraint_name;
+  if (!FieldType::is_array(class_name())) {
+    constraint_name = class_name;
+  } else {
+    // For array classes, their klassOops are not kept in the
+    // constraint table. The element classes are.
+    jint dimension;
+    symbolOop object_key;
+    BasicType t = FieldType::get_array_info(class_name(), &dimension,
+                                            &object_key, CHECK_(false));
+    // primitive types always pass
+    if (t != T_OBJECT) {
+      return true;
+    } else {
+      constraint_name = symbolHandle(THREAD, object_key);
+    }
+  }
+  unsigned int d_hash1 = dictionary()->compute_hash(constraint_name, class_loader1);
   int d_index1 = dictionary()->hash_to_index(d_hash1);
 
-  unsigned int d_hash2 = dictionary()->compute_hash(class_name, class_loader2);
+  unsigned int d_hash2 = dictionary()->compute_hash(constraint_name, class_loader2);
   int d_index2 = dictionary()->hash_to_index(d_hash2);
-
   {
-    MutexLocker mu_s(SystemDictionary_lock, THREAD);
+  MutexLocker mu_s(SystemDictionary_lock, THREAD);
 
-    // Better never do a GC while we're holding these oops
-    No_Safepoint_Verifier nosafepoint;
+  // Better never do a GC while we're holding these oops
+  No_Safepoint_Verifier nosafepoint;
 
-    klassOop klass1 = find_class(d_index1, d_hash1, class_name, class_loader1);
-    klassOop klass2 = find_class(d_index2, d_hash2, class_name, class_loader2);
-    return constraints()->add_entry(class_name, klass1, class_loader1,
-                                    klass2, class_loader2);
+  klassOop klass1 = find_class(d_index1, d_hash1, constraint_name, class_loader1);
+  klassOop klass2 = find_class(d_index2, d_hash2, constraint_name, class_loader2);
+  return constraints()->add_entry(constraint_name, klass1, class_loader1,
+                                  klass2, class_loader2);
   }
 }
 
@@ -2301,6 +2317,7 @@
 // Returns the name of the type that failed a loader constraint check, or
 // NULL if no constraint failed. The returned C string needs cleaning up
 // with a ResourceMark in the caller.  No exception except OOME is thrown.
+// Arrays are not added to the loader constraint table, their elements are.
 char* SystemDictionary::check_signature_loaders(symbolHandle signature,
                                                Handle loader1, Handle loader2,
                                                bool is_method, TRAPS)  {
--- a/src/share/vm/classfile/vmSymbols.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -357,6 +357,8 @@
   template(void_double_signature,                     "()D")                                      \
   template(int_void_signature,                        "(I)V")                                     \
   template(int_int_signature,                         "(I)I")                                     \
+  template(char_char_signature,                       "(C)C")                                     \
+  template(short_short_signature,                     "(S)S")                                     \
   template(int_bool_signature,                        "(I)Z")                                     \
   template(float_int_signature,                       "(F)I")                                     \
   template(double_long_signature,                     "(D)J")                                     \
@@ -585,6 +587,10 @@
    do_name(     reverseBytes_name,                               "reverseBytes")                                        \
   do_intrinsic(_reverseBytes_l,           java_lang_Long,         reverseBytes_name,        long_long_signature, F_S)   \
     /*  (symbol reverseBytes_name defined above) */                                                                     \
+  do_intrinsic(_reverseBytes_c,           java_lang_Character,    reverseBytes_name,        char_char_signature, F_S)   \
+    /*  (symbol reverseBytes_name defined above) */                                                                     \
+  do_intrinsic(_reverseBytes_s,           java_lang_Short,        reverseBytes_name,        short_short_signature, F_S) \
+    /*  (symbol reverseBytes_name defined above) */                                                                     \
                                                                                                                         \
   do_intrinsic(_identityHashCode,         java_lang_System,       identityHashCode_name, object_int_signature,   F_S)   \
    do_name(     identityHashCode_name,                           "identityHashCode")                                    \
--- a/src/share/vm/code/codeBlob.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/code/codeBlob.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -249,7 +249,6 @@
   size += round_to(buffer_size, oopSize);
   assert(name != NULL, "must provide a name");
   {
-
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     blob = new (size) BufferBlob(name, size);
   }
@@ -271,7 +270,6 @@
   unsigned int size = allocation_size(cb, sizeof(BufferBlob));
   assert(name != NULL, "must provide a name");
   {
-
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     blob = new (size) BufferBlob(name, size, cb);
   }
@@ -298,10 +296,48 @@
   MemoryService::track_code_cache_memory_usage();
 }
 
-bool BufferBlob::is_adapter_blob() const {
-  return (strcmp(AdapterHandlerEntry::name, name()) == 0);
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of AdapterBlob
+
+AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  AdapterBlob* blob = NULL;
+  unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) AdapterBlob(size, cb);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
 }
 
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of MethodHandlesAdapterBlob
+
+MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  MethodHandlesAdapterBlob* blob = NULL;
+  unsigned int size = sizeof(MethodHandlesAdapterBlob);
+  // align the size to CodeEntryAlignment
+  size = align_code_offset(size);
+  size += round_to(buffer_size, oopSize);
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) MethodHandlesAdapterBlob(size);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
+
+
 //----------------------------------------------------------------------------------------------------
 // Implementation of RuntimeStub
 
--- a/src/share/vm/code/codeBlob.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/code/codeBlob.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,14 +90,15 @@
   void flush();
 
   // Typing
-  virtual bool is_buffer_blob() const            { return false; }
-  virtual bool is_nmethod() const                { return false; }
-  virtual bool is_runtime_stub() const           { return false; }
-  virtual bool is_deoptimization_stub() const    { return false; }
-  virtual bool is_uncommon_trap_stub() const     { return false; }
-  virtual bool is_exception_stub() const         { return false; }
-  virtual bool is_safepoint_stub() const         { return false; }
-  virtual bool is_adapter_blob() const           { return false; }
+  virtual bool is_buffer_blob() const                 { return false; }
+  virtual bool is_nmethod() const                     { return false; }
+  virtual bool is_runtime_stub() const                { return false; }
+  virtual bool is_deoptimization_stub() const         { return false; }
+  virtual bool is_uncommon_trap_stub() const          { return false; }
+  virtual bool is_exception_stub() const              { return false; }
+  virtual bool is_safepoint_stub() const              { return false; }
+  virtual bool is_adapter_blob() const                { return false; }
+  virtual bool is_method_handles_adapter_blob() const { return false; }
 
   virtual bool is_compiled_by_c2() const         { return false; }
   virtual bool is_compiled_by_c1() const         { return false; }
@@ -221,6 +222,9 @@
 
 class BufferBlob: public CodeBlob {
   friend class VMStructs;
+  friend class AdapterBlob;
+  friend class MethodHandlesAdapterBlob;
+
  private:
   // Creation support
   BufferBlob(const char* name, int size);
@@ -236,8 +240,7 @@
   static void free(BufferBlob* buf);
 
   // Typing
-  bool is_buffer_blob() const                    { return true; }
-  bool is_adapter_blob() const;
+  virtual bool is_buffer_blob() const            { return true; }
 
   // GC/Verification support
   void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f)  { /* nothing to do */ }
@@ -255,6 +258,40 @@
 
 
 //----------------------------------------------------------------------------------------------------
+// AdapterBlob: used to hold C2I/I2C adapters
+
+class AdapterBlob: public BufferBlob {
+private:
+  AdapterBlob(int size)                 : BufferBlob("I2C/C2I adapters", size) {}
+  AdapterBlob(int size, CodeBuffer* cb) : BufferBlob("I2C/C2I adapters", size, cb) {}
+
+public:
+  // Creation
+  static AdapterBlob* create(CodeBuffer* cb);
+
+  // Typing
+  virtual bool is_adapter_blob() const { return true; }
+};
+
+
+//----------------------------------------------------------------------------------------------------
+// MethodHandlesAdapterBlob: used to hold MethodHandles adapters
+
+class MethodHandlesAdapterBlob: public BufferBlob {
+private:
+  MethodHandlesAdapterBlob(int size)                 : BufferBlob("MethodHandles adapters", size) {}
+  MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
+
+public:
+  // Creation
+  static MethodHandlesAdapterBlob* create(int buffer_size);
+
+  // Typing
+  virtual bool is_method_handles_adapter_blob() const { return true; }
+};
+
+
+//----------------------------------------------------------------------------------------------------
 // RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine
 
 class RuntimeStub: public CodeBlob {
--- a/src/share/vm/code/codeCache.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/code/codeCache.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -284,9 +284,11 @@
       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
     }
 #endif //PRODUCT
-    if (is_live)
+    if (is_live) {
       // Perform cur->oops_do(f), maybe just once per nmethod.
       f->do_code_blob(cur);
+      cur->fix_oop_relocations();
+    }
   }
 
   // Check for stray marks.
--- a/src/share/vm/code/nmethod.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/code/nmethod.cpp	Thu May 13 18:26:35 2010 -0700
@@ -685,6 +685,7 @@
     _exception_offset        = 0;
     _deoptimize_offset       = 0;
     _deoptimize_mh_offset    = 0;
+    _unwind_handler_offset   = -1;
     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
     _orig_pc_offset          = 0;
     _stub_offset             = data_offset();
@@ -798,6 +799,11 @@
     _exception_offset        = _stub_offset + offsets->value(CodeOffsets::Exceptions);
     _deoptimize_offset       = _stub_offset + offsets->value(CodeOffsets::Deopt);
     _deoptimize_mh_offset    = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
+    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
+      _unwind_handler_offset   = instructions_offset() + offsets->value(CodeOffsets::UnwindHandler);
+    } else {
+      _unwind_handler_offset   = -1;
+    }
     _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
     _scopes_data_offset      = data_offset();
     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
--- a/src/share/vm/code/nmethod.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/code/nmethod.hpp	Thu May 13 18:26:35 2010 -0700
@@ -154,6 +154,9 @@
   // All deoptee's at a MethodHandle call site will resume execution
   // at this location described by this offset.
   int _deoptimize_mh_offset;
+  // Offset of the unwind handler if it exists
+  int _unwind_handler_offset;
+
 #ifdef HAVE_DTRACE_H
   int _trap_offset;
 #endif // def HAVE_DTRACE_H
@@ -341,6 +344,7 @@
   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
   address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
   address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
+  address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
   address stub_end              () const          { return           header_begin() + _consts_offset        ; }
   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
--- a/src/share/vm/compiler/compileBroker.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Thu May 13 18:26:35 2010 -0700
@@ -988,10 +988,12 @@
     }
     if (method->is_not_compilable(comp_level)) return NULL;
 
-    nmethod* saved = CodeCache::find_and_remove_saved_code(method());
-    if (saved != NULL) {
-      method->set_code(method, saved);
-      return saved;
+    if (UseCodeCacheFlushing) {
+      nmethod* saved = CodeCache::find_and_remove_saved_code(method());
+      if (saved != NULL) {
+        method->set_code(method, saved);
+        return saved;
+      }
     }
 
   } else {
@@ -1412,9 +1414,14 @@
     intx thread_id = os::current_thread_id();
     for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
       const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
-      if (dir == NULL)  dir = "";
-      sprintf(fileBuf, "%shs_c" UINTX_FORMAT "_pid%u.log",
-              dir, thread_id, os::current_process_id());
+      if (dir == NULL) {
+        jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log",
+                     thread_id, os::current_process_id());
+      } else {
+        jio_snprintf(fileBuf, sizeof(fileBuf),
+                     "%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
+                     os::file_separator(), thread_id, os::current_process_id());
+      }
       fp = fopen(fileBuf, "at");
       if (fp != NULL) {
         file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Thu May 13 18:26:35 2010 -0700
@@ -46,9 +46,9 @@
 
   _processor_count = os::active_processor_count();
 
-  if (CMSConcurrentMTEnabled && (ParallelCMSThreads > 1)) {
+  if (CMSConcurrentMTEnabled && (ConcGCThreads > 1)) {
     assert(_processor_count > 0, "Processor count is suspect");
-    _concurrent_processor_count = MIN2((uint) ParallelCMSThreads,
+    _concurrent_processor_count = MIN2((uint) ConcGCThreads,
                                        (uint) _processor_count);
   } else {
     _concurrent_processor_count = 1;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,10 @@
   ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
 
   void initialize_gc_policy_counters();
-#if 1
+
   virtual void initialize_size_policy(size_t init_eden_size,
                                       size_t init_promo_size,
                                       size_t init_survivor_size);
-#endif
 
   // Returns true if the incremental mode is enabled.
   virtual bool has_soft_ended_eden();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -606,7 +606,7 @@
     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
   }
 
-  if (!_markStack.allocate(CMSMarkStackSize)) {
+  if (!_markStack.allocate(MarkStackSize)) {
     warning("Failed to allocate CMS Marking Stack");
     return;
   }
@@ -617,13 +617,13 @@
 
   // Support for multi-threaded concurrent phases
   if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
-    if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
+    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
       // just for now
-      FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
-    }
-    if (ParallelCMSThreads > 1) {
+      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
+    }
+    if (ConcGCThreads > 1) {
       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
-                                 ParallelCMSThreads, true);
+                                 ConcGCThreads, true);
       if (_conc_workers == NULL) {
         warning("GC/CMS: _conc_workers allocation failure: "
               "forcing -CMSConcurrentMTEnabled");
@@ -634,13 +634,13 @@
     }
   }
   if (!CMSConcurrentMTEnabled) {
-    ParallelCMSThreads = 0;
+    ConcGCThreads = 0;
   } else {
     // Turn off CMSCleanOnEnter optimization temporarily for
     // the MT case where it's not fixed yet; see 6178663.
     CMSCleanOnEnter = false;
   }
-  assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
+  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
          "Inconsistency");
 
   // Parallel task queues; these are shared for the
@@ -648,7 +648,7 @@
   // are not shared with parallel scavenge (ParNew).
   {
     uint i;
-    uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
+    uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 
     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
          || ParallelRefProcEnabled)
@@ -723,8 +723,9 @@
 
   // Support for parallelizing survivor space rescan
   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
-    size_t max_plab_samples = cp->max_gen0_size()/
-                                ((SurvivorRatio+2)*MinTLABSize);
+    const size_t max_plab_samples =
+      ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
+
     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
@@ -1814,8 +1815,19 @@
     do_compaction_work(clear_all_soft_refs);
 
     // Has the GC time limit been exceeded?
-    check_gc_time_limit();
-
+    DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
+    size_t max_eden_size = young_gen->max_capacity() -
+                           young_gen->to()->capacity() -
+                           young_gen->from()->capacity();
+    GenCollectedHeap* gch = GenCollectedHeap::heap();
+    GCCause::Cause gc_cause = gch->gc_cause();
+    size_policy()->check_gc_overhead_limit(_young_gen->used(),
+                                           young_gen->eden()->used(),
+                                           _cmsGen->max_capacity(),
+                                           max_eden_size,
+                                           full,
+                                           gc_cause,
+                                           gch->collector_policy());
   } else {
     do_mark_sweep_work(clear_all_soft_refs, first_state,
       should_start_over);
@@ -1827,55 +1839,6 @@
   return;
 }
 
-void CMSCollector::check_gc_time_limit() {
-
-  // Ignore explicit GC's.  Exiting here does not set the flag and
-  // does not reset the count.  Updating of the averages for system
-  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
-  GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
-  if (GCCause::is_user_requested_gc(gc_cause) ||
-      GCCause::is_serviceability_requested_gc(gc_cause)) {
-    return;
-  }
-
-  // Calculate the fraction of the CMS generation was freed during
-  // the last collection.
-  // Only consider the STW compacting cost for now.
-  //
-  // Note that the gc time limit test only works for the collections
-  // of the young gen + tenured gen and not for collections of the
-  // permanent gen.  That is because the calculation of the space
-  // freed by the collection is the free space in the young gen +
-  // tenured gen.
-
-  double fraction_free =
-    ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
-  if ((100.0 * size_policy()->compacting_gc_cost()) >
-         ((double) GCTimeLimit) &&
-        ((fraction_free * 100) < GCHeapFreeLimit)) {
-    size_policy()->inc_gc_time_limit_count();
-    if (UseGCOverheadLimit &&
-        (size_policy()->gc_time_limit_count() >
-         AdaptiveSizePolicyGCTimeLimitThreshold)) {
-      size_policy()->set_gc_time_limit_exceeded(true);
-      // Avoid consecutive OOM due to the gc time limit by resetting
-      // the counter.
-      size_policy()->reset_gc_time_limit_count();
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("      GC is exceeding overhead limit "
-          "of %d%%", GCTimeLimit);
-      }
-    } else {
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("      GC would exceed overhead limit "
-          "of %d%%", GCTimeLimit);
-      }
-    }
-  } else {
-    size_policy()->reset_gc_time_limit_count();
-  }
-}
-
 // Resize the perm generation and the tenured generation
 // after obtaining the free list locks for the
 // two generations.
@@ -3657,7 +3620,7 @@
   assert(_revisitStack.isEmpty(), "tabula rasa");
   DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
   bool result = false;
-  if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
+  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
     result = do_marking_mt(asynch);
   } else {
     result = do_marking_st(asynch);
@@ -4174,10 +4137,10 @@
 }
 
 bool CMSCollector::do_marking_mt(bool asynch) {
-  assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
+  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
   // In the future this would be determined ergonomically, based
   // on #cpu's, # active mutator threads (and load), and mutation rate.
-  int num_workers = ParallelCMSThreads;
+  int num_workers = ConcGCThreads;
 
   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
@@ -6181,6 +6144,11 @@
       }
       curAddr = chunk.end();
     }
+    // A successful mostly concurrent collection has been done.
+    // Because only the full (i.e., concurrent mode failure) collections
+    // are being measured for gc overhead limits, clean the "near" flag
+    // and count.
+    sp->reset_gc_overhead_limit_count();
     _collectorState = Idling;
   } else {
     // already have the lock
@@ -6429,8 +6397,8 @@
 // For now we take the expedient path of just disabling the
 // messages for the problematic case.)
 void CMSMarkStack::expand() {
-  assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
-  if (_capacity == CMSMarkStackSizeMax) {
+  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
+  if (_capacity == MarkStackSizeMax) {
     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
       // We print a warning message only once per CMS cycle.
       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
@@ -6438,7 +6406,7 @@
     return;
   }
   // Double capacity if possible
-  size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
+  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
   // Do not give up existing stack until we have managed to
   // get the double capacity that we desired.
   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -570,10 +570,6 @@
   ConcurrentMarkSweepPolicy* _collector_policy;
   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 
-  // Check whether the gc time limit has been
-  // exceeded and set the size policy flag
-  // appropriately.
-  void check_gc_time_limit();
   // XXX Move these to CMSStats ??? FIX ME !!!
   elapsedTimer _inter_sweep_timer;   // time between sweeps
   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu May 13 18:26:35 2010 -0700
@@ -44,20 +44,20 @@
 {
 
   // Ergomonically select initial concurrent refinement parameters
-  if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2<int>(ParallelGCThreads, 1));
+  if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
   }
-  set_green_zone(G1ConcRefineGreenZone);
+  set_green_zone(G1ConcRefinementGreenZone);
 
-  if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3);
+  if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
   }
-  set_yellow_zone(MAX2<int>(G1ConcRefineYellowZone, green_zone()));
+  set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
 
-  if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2);
+  if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
   }
-  set_red_zone(MAX2<int>(G1ConcRefineRedZone, yellow_zone()));
+  set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
   _n_worker_threads = thread_num();
   // We need one extra thread to do the young gen rset size sampling.
   _n_threads = _n_worker_threads + 1;
@@ -76,15 +76,15 @@
 }
 
 void ConcurrentG1Refine::reset_threshold_step() {
-  if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) {
+  if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
     _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
   } else {
-    _thread_threshold_step = G1ConcRefineThresholdStep;
+    _thread_threshold_step = G1ConcRefinementThresholdStep;
   }
 }
 
 int ConcurrentG1Refine::thread_num() {
-  return MAX2<int>((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1);
+  return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
 }
 
 void ConcurrentG1Refine::init() {
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu May 13 18:26:35 2010 -0700
@@ -39,7 +39,8 @@
   * running. If the length becomes red (max queue length) the mutators start
   * processing the buffers.
   *
-  * There are some interesting cases (with G1AdaptiveConcRefine turned off):
+  * There are some interesting cases (when G1UseAdaptiveConcRefinement
+  * is turned off):
   * 1) green = yellow = red = 0. In this case the mutator will process all
   *    buffers. Except for those that are created by the deferred updates
   *    machinery during a collection.
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,9 +69,9 @@
   G1CollectorPolicy* g1p = g1h->g1_policy();
   if (g1p->adaptive_young_list_length()) {
     int regions_visited = 0;
-    g1h->young_list_rs_length_sampling_init();
-    while (g1h->young_list_rs_length_sampling_more()) {
-      g1h->young_list_rs_length_sampling_next();
+    g1h->young_list()->rs_length_sampling_init();
+    while (g1h->young_list()->rs_length_sampling_more()) {
+      g1h->young_list()->rs_length_sampling_next();
       ++regions_visited;
 
       // we try to yield every time we visit 10 regions
@@ -107,7 +107,7 @@
     if (_should_terminate) {
       break;
     }
-    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval);
+    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);
   }
 }
 
@@ -127,7 +127,7 @@
 void ConcurrentG1RefineThread::activate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   if (_worker_id > 0) {
-    if (G1TraceConcurrentRefinement) {
+    if (G1TraceConcRefinement) {
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
                              _worker_id, _threshold, (int)dcqs.completed_buffers_num());
@@ -143,7 +143,7 @@
 void ConcurrentG1RefineThread::deactivate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   if (_worker_id > 0) {
-    if (G1TraceConcurrentRefinement) {
+    if (G1TraceConcRefinement) {
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
                              _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
@@ -162,6 +162,7 @@
   if (_worker_id >= cg1r()->worker_thread_num()) {
     run_young_rs_sampling();
     terminate();
+    return;
   }
 
   _vtime_start = os::elapsedVTime();
@@ -218,9 +219,13 @@
 
 
 void ConcurrentG1RefineThread::yield() {
-  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield");
+  if (G1TraceConcRefinement) {
+    gclog_or_tty->print_cr("G1-Refine-yield");
+  }
   _sts.yield("G1 refine");
-  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield-end");
+  if (G1TraceConcRefinement) {
+    gclog_or_tty->print_cr("G1-Refine-yield-end");
+  }
 }
 
 void ConcurrentG1RefineThread::stop() {
@@ -241,7 +246,9 @@
       Terminator_lock->wait();
     }
   }
-  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-stop");
+  if (G1TraceConcRefinement) {
+    gclog_or_tty->print_cr("G1-Refine-stop");
+  }
 }
 
 void ConcurrentG1RefineThread::print() const {
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu May 13 18:26:35 2010 -0700
@@ -297,6 +297,11 @@
   }
 }
 
+// Currently we do not call this at all. Normally we would call it
+// during the concurrent marking / remark phases but we now call
+// the lock-based version instead. But we might want to resurrect this
+// code in the future. So, we'll leave it here commented out.
+#if 0
 MemRegion CMRegionStack::pop() {
   while (true) {
     // Otherwise...
@@ -321,6 +326,41 @@
     // Otherwise, we need to try again.
   }
 }
+#endif // 0
+
+void CMRegionStack::push_with_lock(MemRegion mr) {
+  assert(mr.word_size() > 0, "Precondition");
+  MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
+
+  if (isFull()) {
+    _overflow = true;
+    return;
+  }
+
+  _base[_index] = mr;
+  _index += 1;
+}
+
+MemRegion CMRegionStack::pop_with_lock() {
+  MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
+
+  while (true) {
+    if (_index == 0) {
+      return MemRegion();
+    }
+    _index -= 1;
+
+    MemRegion mr = _base[_index];
+    if (mr.start() != NULL) {
+      assert(mr.end() != NULL, "invariant");
+      assert(mr.word_size() > 0, "invariant");
+      return mr;
+    } else {
+      // that entry was invalidated... let's skip it
+      assert(mr.end() == NULL, "invariant");
+    }
+  }
+}
 
 bool CMRegionStack::invalidate_entries_into_cset() {
   bool result = false;
@@ -447,7 +487,7 @@
     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 
-  _markStack.allocate(G1MarkStackSize);
+  _markStack.allocate(MarkStackSize);
   _regionStack.allocate(G1MarkRegionStackSize);
 
   // Create & start a ConcurrentMark thread.
@@ -461,7 +501,7 @@
   assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
 
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
-  satb_qs.set_buffer_size(G1SATBLogBufferSize);
+  satb_qs.set_buffer_size(G1SATBBufferSize);
 
   int size = (int) MAX2(ParallelGCThreads, (size_t)1);
   _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
@@ -483,8 +523,8 @@
     _accum_task_vtime[i] = 0.0;
   }
 
-  if (ParallelMarkingThreads > ParallelGCThreads) {
-    vm_exit_during_initialization("Can't have more ParallelMarkingThreads "
+  if (ConcGCThreads > ParallelGCThreads) {
+    vm_exit_during_initialization("Can't have more ConcGCThreads "
                                   "than ParallelGCThreads.");
   }
   if (ParallelGCThreads == 0) {
@@ -494,11 +534,11 @@
     _sleep_factor             = 0.0;
     _marking_task_overhead    = 1.0;
   } else {
-    if (ParallelMarkingThreads > 0) {
-      // notice that ParallelMarkingThreads overwrites G1MarkingOverheadPercent
+    if (ConcGCThreads > 0) {
+      // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
       // if both are set
 
-      _parallel_marking_threads = ParallelMarkingThreads;
+      _parallel_marking_threads = ConcGCThreads;
       _sleep_factor             = 0.0;
       _marking_task_overhead    = 1.0;
     } else if (G1MarkingOverheadPercent > 0) {
@@ -668,24 +708,46 @@
 //
 
 void ConcurrentMark::clearNextBitmap() {
-   guarantee(!G1CollectedHeap::heap()->mark_in_progress(), "Precondition.");
-
-   // clear the mark bitmap (no grey objects to start with).
-   // We need to do this in chunks and offer to yield in between
-   // each chunk.
-   HeapWord* start  = _nextMarkBitMap->startWord();
-   HeapWord* end    = _nextMarkBitMap->endWord();
-   HeapWord* cur    = start;
-   size_t chunkSize = M;
-   while (cur < end) {
-     HeapWord* next = cur + chunkSize;
-     if (next > end)
-       next = end;
-     MemRegion mr(cur,next);
-     _nextMarkBitMap->clearRange(mr);
-     cur = next;
-     do_yield_check();
-   }
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+
+  // Make sure that the concurrent mark thread looks to still be in
+  // the current cycle.
+  guarantee(cmThread()->during_cycle(), "invariant");
+
+  // We are finishing up the current cycle by clearing the next
+  // marking bitmap and getting it ready for the next cycle. During
+  // this time no other cycle can start. So, let's make sure that this
+  // is the case.
+  guarantee(!g1h->mark_in_progress(), "invariant");
+
+  // clear the mark bitmap (no grey objects to start with).
+  // We need to do this in chunks and offer to yield in between
+  // each chunk.
+  HeapWord* start  = _nextMarkBitMap->startWord();
+  HeapWord* end    = _nextMarkBitMap->endWord();
+  HeapWord* cur    = start;
+  size_t chunkSize = M;
+  while (cur < end) {
+    HeapWord* next = cur + chunkSize;
+    if (next > end)
+      next = end;
+    MemRegion mr(cur,next);
+    _nextMarkBitMap->clearRange(mr);
+    cur = next;
+    do_yield_check();
+
+    // Repeat the asserts from above. We'll do them as asserts here to
+    // minimize their overhead on the product. However, we'll have
+    // them as guarantees at the beginning / end of the bitmap
+    // clearing to get some checking in the product.
+    assert(cmThread()->during_cycle(), "invariant");
+    assert(!g1h->mark_in_progress(), "invariant");
+  }
+
+  // Repeat the asserts from above.
+  guarantee(cmThread()->during_cycle(), "invariant");
+  guarantee(!g1h->mark_in_progress(), "invariant");
 }
 
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
@@ -705,7 +767,8 @@
   _has_aborted = false;
 
   if (G1PrintReachableAtInitialMark) {
-    print_reachable(true, "before");
+    print_reachable("at-cycle-start",
+                    true /* use_prev_marking */, true /* all */);
   }
 
   // Initialise marking structures. This has to be done in a STW phase.
@@ -760,7 +823,10 @@
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-  satb_mq_set.set_active_all_threads(true);
+  // This is the start of  the marking cycle, we're expected all
+  // threads to have SATB queues with active set to false.
+  satb_mq_set.set_active_all_threads(true, /* new active value */
+                                     false /* expected_active */);
 
   // update_g1_committed() will be called at the end of an evac pause
   // when marking is on. So, it's also called at the end of the
@@ -1079,7 +1145,11 @@
       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
   } else {
     // We're done with marking.
-    JavaThread::satb_mark_queue_set().set_active_all_threads(false);
+    // This is the end of  the marking cycle, we're expected all
+    // threads to have SATB queues with active set to true.
+    JavaThread::satb_mark_queue_set().set_active_all_threads(
+                                                  false, /* new active value */
+                                                  true /* expected_active */);
 
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
@@ -1910,19 +1980,21 @@
 
 #ifndef PRODUCT
 
-class ReachablePrinterOopClosure: public OopClosure {
+class PrintReachableOopClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
   CMBitMapRO*      _bitmap;
   outputStream*    _out;
   bool             _use_prev_marking;
+  bool             _all;
 
 public:
-  ReachablePrinterOopClosure(CMBitMapRO*   bitmap,
-                             outputStream* out,
-                             bool          use_prev_marking) :
+  PrintReachableOopClosure(CMBitMapRO*   bitmap,
+                           outputStream* out,
+                           bool          use_prev_marking,
+                           bool          all) :
     _g1h(G1CollectedHeap::heap()),
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
+    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
 
   void do_oop(narrowOop* p) { do_oop_work(p); }
   void do_oop(      oop* p) { do_oop_work(p); }
@@ -1932,9 +2004,11 @@
     const char* str = NULL;
     const char* str2 = "";
 
-    if (!_g1h->is_in_g1_reserved(obj))
-      str = "outside G1 reserved";
-    else {
+    if (obj == NULL) {
+      str = "";
+    } else if (!_g1h->is_in_g1_reserved(obj)) {
+      str = " O";
+    } else {
       HeapRegion* hr  = _g1h->heap_region_containing(obj);
       guarantee(hr != NULL, "invariant");
       bool over_tams = false;
@@ -1943,74 +2017,67 @@
       } else {
         over_tams = hr->obj_allocated_since_next_marking(obj);
       }
+      bool marked = _bitmap->isMarked((HeapWord*) obj);
 
       if (over_tams) {
-        str = "over TAMS";
-        if (_bitmap->isMarked((HeapWord*) obj)) {
+        str = " >";
+        if (marked) {
           str2 = " AND MARKED";
         }
-      } else if (_bitmap->isMarked((HeapWord*) obj)) {
-        str = "marked";
+      } else if (marked) {
+        str = " M";
       } else {
-        str = "#### NOT MARKED ####";
+        str = " NOT";
       }
     }
 
-    _out->print_cr("    "PTR_FORMAT" contains "PTR_FORMAT" %s%s",
+    _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
                    p, (void*) obj, str, str2);
   }
 };
 
-class ReachablePrinterClosure: public BitMapClosure {
+class PrintReachableObjectClosure : public ObjectClosure {
 private:
   CMBitMapRO*   _bitmap;
   outputStream* _out;
   bool          _use_prev_marking;
+  bool          _all;
+  HeapRegion*   _hr;
 
 public:
-  ReachablePrinterClosure(CMBitMapRO*   bitmap,
-                          outputStream* out,
-                          bool          use_prev_marking) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
-
-  bool do_bit(size_t offset) {
-    HeapWord* addr = _bitmap->offsetToHeapWord(offset);
-    ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
-
-    _out->print_cr("  obj "PTR_FORMAT", offset %10d (marked)", addr, offset);
-    oop(addr)->oop_iterate(&oopCl);
-    _out->print_cr("");
-
-    return true;
+  PrintReachableObjectClosure(CMBitMapRO*   bitmap,
+                              outputStream* out,
+                              bool          use_prev_marking,
+                              bool          all,
+                              HeapRegion*   hr) :
+    _bitmap(bitmap), _out(out),
+    _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
+
+  void do_object(oop o) {
+    bool over_tams;
+    if (_use_prev_marking) {
+      over_tams = _hr->obj_allocated_since_prev_marking(o);
+    } else {
+      over_tams = _hr->obj_allocated_since_next_marking(o);
+    }
+    bool marked = _bitmap->isMarked((HeapWord*) o);
+    bool print_it = _all || over_tams || marked;
+
+    if (print_it) {
+      _out->print_cr(" "PTR_FORMAT"%s",
+                     o, (over_tams) ? " >" : (marked) ? " M" : "");
+      PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
+      o->oop_iterate(&oopCl);
+    }
   }
 };
 
-class ObjInRegionReachablePrinterClosure : public ObjectClosure {
+class PrintReachableRegionClosure : public HeapRegionClosure {
 private:
   CMBitMapRO*   _bitmap;
   outputStream* _out;
   bool          _use_prev_marking;
-
-public:
-  ObjInRegionReachablePrinterClosure(CMBitMapRO*   bitmap,
-                                     outputStream* out,
-                                     bool          use_prev_marking) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
-
-  void do_object(oop o) {
-    ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
-
-    _out->print_cr("  obj "PTR_FORMAT" (over TAMS)", (void*) o);
-    o->oop_iterate(&oopCl);
-    _out->print_cr("");
-  }
-};
-
-class RegionReachablePrinterClosure : public HeapRegionClosure {
-private:
-  CMBitMapRO*   _bitmap;
-  outputStream* _out;
-  bool          _use_prev_marking;
+  bool          _all;
 
 public:
   bool doHeapRegion(HeapRegion* hr) {
@@ -2025,22 +2092,35 @@
     }
     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
                    "TAMS: "PTR_FORMAT, b, e, t, p);
-    _out->print_cr("");
-
-    ObjInRegionReachablePrinterClosure ocl(_bitmap, _out, _use_prev_marking);
-    hr->object_iterate_mem_careful(MemRegion(p, t), &ocl);
+    _out->cr();
+
+    HeapWord* from = b;
+    HeapWord* to   = t;
+
+    if (to > from) {
+      _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
+      _out->cr();
+      PrintReachableObjectClosure ocl(_bitmap, _out,
+                                      _use_prev_marking, _all, hr);
+      hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
+      _out->cr();
+    }
 
     return false;
   }
 
-  RegionReachablePrinterClosure(CMBitMapRO*   bitmap,
-                                outputStream* out,
-                                bool          use_prev_marking) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
+  PrintReachableRegionClosure(CMBitMapRO*   bitmap,
+                              outputStream* out,
+                              bool          use_prev_marking,
+                              bool          all) :
+    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
 };
 
-void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) {
-  gclog_or_tty->print_cr("== Doing reachable object dump... ");
+void ConcurrentMark::print_reachable(const char* str,
+                                     bool use_prev_marking,
+                                     bool all) {
+  gclog_or_tty->cr();
+  gclog_or_tty->print_cr("== Doing heap dump... ");
 
   if (G1PrintReachableBaseFile == NULL) {
     gclog_or_tty->print_cr("  #### error: no base file defined");
@@ -2075,19 +2155,14 @@
   out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
   out->cr();
 
-  RegionReachablePrinterClosure rcl(bitmap, out, use_prev_marking);
-  out->print_cr("--- ITERATING OVER REGIONS WITH TAMS < TOP");
-  out->cr();
-  _g1h->heap_region_iterate(&rcl);
+  out->print_cr("--- ITERATING OVER REGIONS");
   out->cr();
-
-  ReachablePrinterClosure cl(bitmap, out, use_prev_marking);
-  out->print_cr("--- ITERATING OVER MARKED OBJECTS ON THE BITMAP");
-  out->cr();
-  bitmap->iterate(&cl);
+  PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
+  _g1h->heap_region_iterate(&rcl);
   out->cr();
 
   gclog_or_tty->print_cr("  done");
+  gclog_or_tty->flush();
 }
 
 #endif // PRODUCT
@@ -2586,7 +2661,11 @@
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   satb_mq_set.abandon_partial_marking();
-  satb_mq_set.set_active_all_threads(false);
+  // This can be called either during or outside marking, we'll read
+  // the expected_active value from the SATB queue set.
+  satb_mq_set.set_active_all_threads(
+                                 false, /* new active value */
+                                 satb_mq_set.is_active() /* expected_active */);
 }
 
 static void print_ms_time_info(const char* prefix, const char* name,
@@ -3352,7 +3431,7 @@
       gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
                              _task_id, _cm->region_stack_size());
 
-    MemRegion mr = _cm->region_stack_pop();
+    MemRegion mr = _cm->region_stack_pop_with_lock();
     // it returns MemRegion() if the pop fails
     statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
 
@@ -3373,7 +3452,7 @@
         if (has_aborted())
           mr = MemRegion();
         else {
-          mr = _cm->region_stack_pop();
+          mr = _cm->region_stack_pop_with_lock();
           // it returns MemRegion() if the pop fails
           statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
         }
@@ -3406,7 +3485,7 @@
           }
           // Now push the part of the region we didn't scan on the
           // region stack to make sure a task scans it later.
-          _cm->region_stack_push(newRegion);
+          _cm->region_stack_push_with_lock(newRegion);
         }
         // break from while
         mr = MemRegion();
@@ -3704,7 +3783,14 @@
         // enough to point to the next possible object header (the
         // bitmap knows by how much we need to move it as it knows its
         // granularity).
-        move_finger_to(_nextMarkBitMap->nextWord(_finger));
+        assert(_finger < _region_limit, "invariant");
+        HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
+        // Check if bitmap iteration was aborted while scanning the last object
+        if (new_finger >= _region_limit) {
+            giveup_current_region();
+        } else {
+            move_finger_to(new_finger);
+        }
       }
     }
     // At this point we have either completed iterating over the
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
 
 class G1CollectedHeap;
 class CMTask;
-typedef GenericTaskQueue<oop> CMTaskQueue;
-typedef GenericTaskQueueSet<oop> CMTaskQueueSet;
+typedef GenericTaskQueue<oop>            CMTaskQueue;
+typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
 
 // A generic CM bit map.  This is essentially a wrapper around the BitMap
 // class, with one bit per (1<<_shifter) HeapWords.
@@ -252,9 +252,19 @@
   // with other "push" operations (no pops).
   void push(MemRegion mr);
 
+#if 0
+  // This is currently not used. See the comment in the .cpp file.
+
   // Lock-free; assumes that it will only be called in parallel
   // with other "pop" operations (no pushes).
   MemRegion pop();
+#endif // 0
+
+  // These two are the implementations that use a lock. They can be
+  // called concurrently with each other but they should not be called
+  // concurrently with the lock-free versions (push() / pop()).
+  void push_with_lock(MemRegion mr);
+  MemRegion pop_with_lock();
 
   bool isEmpty()    { return _index == 0; }
   bool isFull()     { return _index == _capacity; }
@@ -540,6 +550,10 @@
 
   // Manipulation of the region stack
   bool region_stack_push(MemRegion mr) {
+    // Currently we only call the lock-free version during evacuation
+    // pauses.
+    assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
+
     _regionStack.push(mr);
     if (_regionStack.overflow()) {
       set_has_overflown();
@@ -547,7 +561,33 @@
     }
     return true;
   }
-  MemRegion region_stack_pop()          { return _regionStack.pop(); }
+#if 0
+  // Currently this is not used. See the comment in the .cpp file.
+  MemRegion region_stack_pop() { return _regionStack.pop(); }
+#endif // 0
+
+  bool region_stack_push_with_lock(MemRegion mr) {
+    // Currently we only call the lock-based version during either
+    // concurrent marking or remark.
+    assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
+           "if we are at a safepoint it should be the remark safepoint");
+
+    _regionStack.push_with_lock(mr);
+    if (_regionStack.overflow()) {
+      set_has_overflown();
+      return false;
+    }
+    return true;
+  }
+  MemRegion region_stack_pop_with_lock() {
+    // Currently we only call the lock-based version during either
+    // concurrent marking or remark.
+    assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
+           "if we are at a safepoint it should be the remark safepoint");
+
+    return _regionStack.pop_with_lock();
+  }
+
   int region_stack_size()               { return _regionStack.size(); }
   bool region_stack_overflow()          { return _regionStack.overflow(); }
   bool region_stack_empty()             { return _regionStack.isEmpty(); }
@@ -612,11 +652,24 @@
   // we do nothing.
   void markAndGrayObjectIfNecessary(oop p);
 
-  // This iterates over the marking bitmap (either prev or next) and
-  // prints out all objects that are marked on the bitmap and indicates
-  // whether what they point to is also marked or not. It also iterates
-  // the objects over TAMS (either prev or next).
-  void print_reachable(bool use_prev_marking, const char* str);
+  // It iterates over the heap and for each object it comes across it
+  // will dump the contents of its reference fields, as well as
+  // liveness information for the object and its referents. The dump
+  // will be written to a file with the following name:
+  // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
+  // whether the prev (use_prev_marking == true) or next
+  // (use_prev_marking == false) marking information will be used to
+  // determine the liveness of each object / referent. If all is true,
+  // all objects in the heap will be dumped, otherwise only the live
+  // ones. In the dump the following symbols / abbreviations are used:
+  //   M : an explicitly live object (its bitmap bit is set)
+  //   > : an implicitly live object (over tams)
+  //   O : an object outside the G1 heap (typically: in the perm gen)
+  //   NOT : a reference field whose referent is not live
+  //   AND MARKED : indicates that an object is both explicitly and
+  //   implicitly live (it should be one or the other, not both)
+  void print_reachable(const char* str,
+                       bool use_prev_marking, bool all) PRODUCT_RETURN;
 
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
@@ -680,6 +733,19 @@
   // to determine whether any heap regions are located above the finger.
   void registerCSetRegion(HeapRegion* hr);
 
+  // Registers the maximum region-end associated with a set of
+  // regions with CM. Again this is used to determine whether any
+  // heap regions are located above the finger.
+  void register_collection_set_finger(HeapWord* max_finger) {
+    // max_finger is the highest heap region end of the regions currently
+    // contained in the collection set. If this value is larger than
+    // _min_finger then we need to gray objects.
+    // This routine is like registerCSetRegion but for an entire
+    // collection of regions.
+    if (max_finger > _min_finger)
+      _should_gray_objects = true;
+  }
+
   // Returns "true" if at least one mark has been completed.
   bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Thu May 13 18:26:35 2010 -0700
@@ -42,8 +42,8 @@
 
  private:
   ConcurrentMark*                  _cm;
-  bool                             _started;
-  bool                             _in_progress;
+  volatile bool                    _started;
+  volatile bool                    _in_progress;
 
   void sleepBeforeNextCycle();
 
@@ -67,15 +67,25 @@
   // Counting virtual time so far.
   double vtime_count_accum() { return _vtime_count_accum; }
 
-  ConcurrentMark* cm()                           { return _cm;     }
+  ConcurrentMark* cm()     { return _cm; }
+
+  void set_started()       { _started = true;  }
+  void clear_started()     { _started = false; }
+  bool started()           { return _started;  }
+
+  void set_in_progress()   { _in_progress = true;  }
+  void clear_in_progress() { _in_progress = false; }
+  bool in_progress()       { return _in_progress;  }
 
-  void            set_started()                  { _started = true;   }
-  void            clear_started()                { _started = false;  }
-  bool            started()                      { return _started;   }
-
-  void            set_in_progress()              { _in_progress = true;   }
-  void            clear_in_progress()            { _in_progress = false;  }
-  bool            in_progress()                  { return _in_progress;   }
+  // This flag returns true from the moment a marking cycle is
+  // initiated (during the initial-mark pause when started() is set)
+  // to the moment when the cycle completes (just after the next
+  // marking bitmap has been cleared and in_progress() is
+  // cleared). While this flag is true we will not start another cycle
+  // so that cycles do not overlap. We cannot use just in_progress()
+  // as the CM thread might take some time to wake up before noticing
+  // that started() is set and set in_progress().
+  bool during_cycle()      { return started() || in_progress(); }
 
   // Yield for GC
   void            yield();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 // turn it on so that the contents of the young list (scan-only /
 // to-be-collected) are printed at "strategic" points before / during
 // / after the collection --- this is useful for debugging
-#define SCAN_ONLY_VERBOSE 0
+#define YOUNG_LIST_VERBOSE 0
 // CURRENT STATUS
 // This file is under construction.  Search for "FIXME".
 
@@ -133,8 +133,7 @@
 
 YoungList::YoungList(G1CollectedHeap* g1h)
   : _g1h(g1h), _head(NULL),
-    _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
-    _length(0), _scan_only_length(0),
+    _length(0),
     _last_sampled_rs_lengths(0),
     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
 {
@@ -166,48 +165,6 @@
   ++_survivor_length;
 }
 
-HeapRegion* YoungList::pop_region() {
-  while (_head != NULL) {
-    assert( length() > 0, "list should not be empty" );
-    HeapRegion* ret = _head;
-    _head = ret->get_next_young_region();
-    ret->set_next_young_region(NULL);
-    --_length;
-    assert(ret->is_young(), "region should be very young");
-
-    // Replace 'Survivor' region type with 'Young'. So the region will
-    // be treated as a young region and will not be 'confused' with
-    // newly created survivor regions.
-    if (ret->is_survivor()) {
-      ret->set_young();
-    }
-
-    if (!ret->is_scan_only()) {
-      return ret;
-    }
-
-    // scan-only, we'll add it to the scan-only list
-    if (_scan_only_tail == NULL) {
-      guarantee( _scan_only_head == NULL, "invariant" );
-
-      _scan_only_head = ret;
-      _curr_scan_only = ret;
-    } else {
-      guarantee( _scan_only_head != NULL, "invariant" );
-      _scan_only_tail->set_next_young_region(ret);
-    }
-    guarantee( ret->get_next_young_region() == NULL, "invariant" );
-    _scan_only_tail = ret;
-
-    // no need to be tagged as scan-only any more
-    ret->set_young();
-
-    ++_scan_only_length;
-  }
-  assert( length() == 0, "list should be empty" );
-  return NULL;
-}
-
 void YoungList::empty_list(HeapRegion* list) {
   while (list != NULL) {
     HeapRegion* next = list->get_next_young_region();
@@ -225,12 +182,6 @@
   _head = NULL;
   _length = 0;
 
-  empty_list(_scan_only_head);
-  _scan_only_head = NULL;
-  _scan_only_tail = NULL;
-  _scan_only_length = 0;
-  _curr_scan_only = NULL;
-
   empty_list(_survivor_head);
   _survivor_head = NULL;
   _survivor_tail = NULL;
@@ -248,11 +199,11 @@
   HeapRegion* curr = _head;
   HeapRegion* last = NULL;
   while (curr != NULL) {
-    if (!curr->is_young() || curr->is_scan_only()) {
+    if (!curr->is_young()) {
       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
-                             "incorrectly tagged (%d, %d)",
+                             "incorrectly tagged (y: %d, surv: %d)",
                              curr->bottom(), curr->end(),
-                             curr->is_young(), curr->is_scan_only());
+                             curr->is_young(), curr->is_survivor());
       ret = false;
     }
     ++length;
@@ -267,47 +218,10 @@
                            length, _length);
   }
 
-  bool scan_only_ret = true;
-  length = 0;
-  curr = _scan_only_head;
-  last = NULL;
-  while (curr != NULL) {
-    if (!curr->is_young() || curr->is_scan_only()) {
-      gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
-                             "incorrectly tagged (%d, %d)",
-                             curr->bottom(), curr->end(),
-                             curr->is_young(), curr->is_scan_only());
-      scan_only_ret = false;
-    }
-    ++length;
-    last = curr;
-    curr = curr->get_next_young_region();
-  }
-  scan_only_ret = scan_only_ret && (length == _scan_only_length);
-
-  if ( (last != _scan_only_tail) ||
-       (_scan_only_head == NULL && _scan_only_tail != NULL) ||
-       (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
-     gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
-     scan_only_ret = false;
-  }
-
-  if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
-    gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
-    scan_only_ret = false;
-   }
-
-  if (!scan_only_ret) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
-    gclog_or_tty->print_cr("###   list has %d entries, _scan_only_length is %d",
-                  length, _scan_only_length);
-  }
-
-  return ret && scan_only_ret;
+  return ret;
 }
 
-bool YoungList::check_list_empty(bool ignore_scan_only_list,
-                                 bool check_sample) {
+bool YoungList::check_list_empty(bool check_sample) {
   bool ret = true;
 
   if (_length != 0) {
@@ -327,28 +241,7 @@
     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   }
 
-  if (ignore_scan_only_list)
-    return ret;
-
-  bool scan_only_ret = true;
-  if (_scan_only_length != 0) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
-                  _scan_only_length);
-    scan_only_ret = false;
-  }
-  if (_scan_only_head != NULL) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
-     scan_only_ret = false;
-  }
-  if (_scan_only_tail != NULL) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
-    scan_only_ret = false;
-  }
-  if (!scan_only_ret) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
-  }
-
-  return ret && scan_only_ret;
+  return ret;
 }
 
 void
@@ -365,7 +258,18 @@
 void
 YoungList::rs_length_sampling_next() {
   assert( _curr != NULL, "invariant" );
-  _sampled_rs_lengths += _curr->rem_set()->occupied();
+  size_t rs_length = _curr->rem_set()->occupied();
+
+  _sampled_rs_lengths += rs_length;
+
+  // The current region may not yet have been added to the
+  // incremental collection set (it gets added when it is
+  // retired as the current allocation region).
+  if (_curr->in_collection_set()) {
+    // Update the collection set policy information for this region
+    _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
+  }
+
   _curr = _curr->get_next_young_region();
   if (_curr == NULL) {
     _last_sampled_rs_lengths = _sampled_rs_lengths;
@@ -375,54 +279,46 @@
 
 void
 YoungList::reset_auxilary_lists() {
-  // We could have just "moved" the scan-only list to the young list.
-  // However, the scan-only list is ordered according to the region
-  // age in descending order, so, by moving one entry at a time, we
-  // ensure that it is recreated in ascending order.
-
   guarantee( is_empty(), "young list should be empty" );
   assert(check_list_well_formed(), "young list should be well formed");
 
   // Add survivor regions to SurvRateGroup.
   _g1h->g1_policy()->note_start_adding_survivor_regions();
   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
+
   for (HeapRegion* curr = _survivor_head;
        curr != NULL;
        curr = curr->get_next_young_region()) {
     _g1h->g1_policy()->set_region_survivors(curr);
+
+    // The region is a non-empty survivor so let's add it to
+    // the incremental collection set for the next evacuation
+    // pause.
+    _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   }
   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 
+  _head   = _survivor_head;
+  _length = _survivor_length;
   if (_survivor_head != NULL) {
-    _head           = _survivor_head;
-    _length         = _survivor_length + _scan_only_length;
-    _survivor_tail->set_next_young_region(_scan_only_head);
-  } else {
-    _head           = _scan_only_head;
-    _length         = _scan_only_length;
-  }
-
-  for (HeapRegion* curr = _scan_only_head;
-       curr != NULL;
-       curr = curr->get_next_young_region()) {
-    curr->recalculate_age_in_surv_rate_group();
-  }
-  _scan_only_head   = NULL;
-  _scan_only_tail   = NULL;
-  _scan_only_length = 0;
-  _curr_scan_only   = NULL;
-
-  _survivor_head    = NULL;
-  _survivor_tail   = NULL;
-  _survivor_length  = 0;
+    assert(_survivor_tail != NULL, "cause it shouldn't be");
+    assert(_survivor_length > 0, "invariant");
+    _survivor_tail->set_next_young_region(NULL);
+  }
+
+  // Don't clear the survivor list handles until the start of
+  // the next evacuation pause - we need it in order to re-tag
+  // the survivor regions from this evacuation pause as 'young'
+  // at the start of the next.
+
   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 
   assert(check_list_well_formed(), "young list should be well formed");
 }
 
 void YoungList::print() {
-  HeapRegion* lists[] = {_head,   _scan_only_head, _survivor_head};
-  const char* names[] = {"YOUNG", "SCAN-ONLY",     "SURVIVOR"};
+  HeapRegion* lists[] = {_head,   _survivor_head};
+  const char* names[] = {"YOUNG", "SURVIVOR"};
 
   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
@@ -431,7 +327,7 @@
       gclog_or_tty->print_cr("  empty");
     while (curr != NULL) {
       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
-                             "age: %4d, y: %d, s-o: %d, surv: %d",
+                             "age: %4d, y: %d, surv: %d",
                              curr->bottom(), curr->end(),
                              curr->top(),
                              curr->prev_top_at_mark_start(),
@@ -439,7 +335,6 @@
                              curr->top_at_conc_mark_count(),
                              curr->age_in_surv_rate_group_cond(),
                              curr->is_young(),
-                             curr->is_scan_only(),
                              curr->is_survivor());
       curr = curr->get_next_young_region();
     }
@@ -583,7 +478,7 @@
            res->zero_fill_state() == HeapRegion::Allocated)),
          "Non-young alloc Regions must be zero filled (and non-H)");
 
-  if (G1PrintRegions) {
+  if (G1PrintHeapRegions) {
     if (res != NULL) {
       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
                              "top "PTR_FORMAT,
@@ -707,6 +602,12 @@
     // region below.
     if (_cur_alloc_region != NULL) {
       // We're finished with the _cur_alloc_region.
+      // As we're builing (at least the young portion) of the collection
+      // set incrementally we'll add the current allocation region to
+      // the collection set here.
+      if (_cur_alloc_region->is_young()) {
+        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
+      }
       _summary_bytes_used += _cur_alloc_region->used();
       _cur_alloc_region = NULL;
     }
@@ -820,6 +721,12 @@
       _free_regions++;
       free_region(_cur_alloc_region);
     } else {
+      // As we're builing (at least the young portion) of the collection
+      // set incrementally we'll add the current allocation region to
+      // the collection set here.
+      if (_cur_alloc_region->is_young()) {
+        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
+      }
       _summary_bytes_used += _cur_alloc_region->used();
     }
     _cur_alloc_region = NULL;
@@ -902,6 +809,10 @@
 
 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
                                     size_t word_size) {
+  if (GC_locker::check_active_before_gc()) {
+    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
+  }
+
   ResourceMark rm;
 
   if (PrintHeapAtGC) {
@@ -909,16 +820,16 @@
   }
 
   if (full && DisableExplicitGC) {
-    gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
     return;
   }
 
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
 
-  if (GC_locker::is_active()) {
-    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
-  }
+  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
+                           collector_policy()->should_clear_all_soft_refs();
+
+  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 
   {
     IsGCActiveMark x;
@@ -926,7 +837,8 @@
     // Timing
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
+    TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
+                PrintGC, true, gclog_or_tty);
 
     TraceMemoryManagerStats tms(true /* fullGC */);
 
@@ -970,6 +882,15 @@
     g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
     tear_down_region_lists();
     set_used_regions_to_need_zero_fill();
+
+    // We may have added regions to the current incremental collection
+    // set between the last GC or pause and now. We need to clear the
+    // incremental collection set and then start rebuilding it afresh
+    // after this full GC.
+    abandon_collection_set(g1_policy()->inc_cset_head());
+    g1_policy()->clear_incremental_cset();
+    g1_policy()->stop_incremental_cset_building();
+
     if (g1_policy()->in_young_gc_mode()) {
       empty_young_list();
       g1_policy()->set_full_young_gcs(true);
@@ -985,12 +906,12 @@
     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
 
     ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(clear_all_soft_refs);
+    ref_processor()->setup_policy(do_clear_all_soft_refs);
 
     // Do collection work
     {
       HandleMark hm;  // Discard invalid handles created during gc
-      G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
+      G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
     }
     // Because freeing humongous regions may have added some unclean
     // regions, it is necessary to tear down again before rebuilding.
@@ -1053,6 +974,15 @@
       perm()->compute_new_size();
     }
 
+    // Start a new incremental collection set for the next pause
+    assert(g1_policy()->collection_set() == NULL, "must be");
+    g1_policy()->start_incremental_cset_building();
+
+    // Clear the _cset_fast_test bitmap in anticipation of adding
+    // regions to the incremental collection set for the next
+    // evacuation pause.
+    clear_cset_fast_test();
+
     double end = os::elapsedTime();
     g1_policy()->record_full_collection_end();
 
@@ -1071,7 +1001,9 @@
 
   if (g1_policy()->in_young_gc_mode()) {
     _young_list->reset_sampled_info();
-    assert( check_young_list_empty(false, false),
+    // At this point there should be no regions in the
+    // entire heap tagged as young.
+    assert( check_young_list_empty(true /* check_heap */),
             "young list should be empty at this point");
   }
 
@@ -1208,6 +1140,9 @@
     return result;
   }
 
+  assert(!collector_policy()->should_clear_all_soft_refs(),
+    "Flag should have been handled and cleared prior to this point");
+
   // What else?  We might try synchronous finalization later.  If the total
   // space available is large enough for the allocation, then a more
   // complete compaction phase than we've tried so far might be
@@ -1565,6 +1500,20 @@
 
   _g1h = this;
 
+   _in_cset_fast_test_length = max_regions();
+   _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
+
+   // We're biasing _in_cset_fast_test to avoid subtracting the
+   // beginning of the heap every time we want to index; basically
+   // it's the same with what we do with the card table.
+   _in_cset_fast_test = _in_cset_fast_test_base -
+                ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
+
+   // Clear the _cset_fast_test bitmap in anticipation of adding
+   // regions to the incremental collection set for the first
+   // evacuation pause.
+   clear_cset_fast_test();
+
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
@@ -2102,18 +2051,21 @@
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
   // Return the remaining space in the cur alloc region, but not less than
   // the min TLAB size.
-  // Also, no more than half the region size, since we can't allow tlabs to
-  // grow big enough to accomodate humongous objects.
-
-  // We need to story it locally, since it might change between when we
-  // test for NULL and when we use it later.
+
+  // Also, this value can be at most the humongous object threshold,
+  // since we can't allow tlabs to grow big enough to accomodate
+  // humongous objects.
+
+  // We need to store the cur alloc region locally, since it might change
+  // between when we test for NULL and when we use it later.
   ContiguousSpace* cur_alloc_space = _cur_alloc_region;
+  size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
+
   if (cur_alloc_space == NULL) {
-    return HeapRegion::GrainBytes/2;
+    return max_tlab_size;
   } else {
-    return MAX2(MIN2(cur_alloc_space->free(),
-                     (size_t)(HeapRegion::GrainBytes/2)),
-                (size_t)MinTLABSize);
+    return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
+                max_tlab_size);
   }
 }
 
@@ -2182,8 +2134,10 @@
     assert(o != NULL, "Huh?");
     if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
       o->oop_iterate(&isLive);
-      if (!_hr->obj_allocated_since_prev_marking(o))
-        _live_bytes += (o->size() * HeapWordSize);
+      if (!_hr->obj_allocated_since_prev_marking(o)) {
+        size_t obj_size = o->size();    // Make sure we don't overflow
+        _live_bytes += (obj_size * HeapWordSize);
+      }
     }
   }
   size_t live_bytes() { return _live_bytes; }
@@ -2385,8 +2339,8 @@
       print_on(gclog_or_tty, true /* extended */);
       gclog_or_tty->print_cr("");
       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
-        concurrent_mark()->print_reachable(use_prev_marking,
-                                           "failed-verification");
+        concurrent_mark()->print_reachable("at-verification-failure",
+                                           use_prev_marking, false /* all */);
       }
       gclog_or_tty->flush();
     }
@@ -2477,7 +2431,7 @@
   if (G1SummarizeRSetStats) {
     g1_rem_set()->print_summary_info();
   }
-  if (G1SummarizeConcurrentMark) {
+  if (G1SummarizeConcMark) {
     concurrent_mark()->print_summary_info();
   }
   if (G1SummarizeZFStats) {
@@ -2655,6 +2609,10 @@
 
 void
 G1CollectedHeap::do_collection_pause_at_safepoint() {
+  if (GC_locker::check_active_before_gc()) {
+    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
+  }
+
   if (PrintHeapAtGC) {
     Universe::print_heap_before_gc();
   }
@@ -2662,6 +2620,11 @@
   {
     ResourceMark rm;
 
+    // This call will decide whether this pause is an initial-mark
+    // pause. If it is, during_initial_mark_pause() will return true
+    // for the duration of this pause.
+    g1_policy()->decide_on_conc_mark_initiation();
+
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->in_young_gc_mode()) {
@@ -2670,7 +2633,7 @@
       else
         strcat(verbose_str, "(partial)");
     }
-    if (g1_policy()->should_initiate_conc_mark())
+    if (g1_policy()->during_initial_mark_pause())
       strcat(verbose_str, " (initial-mark)");
 
     // if PrintGCDetails is on, we'll print long statistics information
@@ -2694,10 +2657,6 @@
              "young list should be well formed");
     }
 
-    if (GC_locker::is_active()) {
-      return; // GC is disabled (e.g. JNI GetXXXCritical operation)
-    }
-
     bool abandoned = false;
     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
       IsGCActiveMark x;
@@ -2733,27 +2692,21 @@
       double start_time_sec = os::elapsedTime();
       size_t start_used_bytes = used();
 
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
+      _young_list->print();
+      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+#endif // YOUNG_LIST_VERBOSE
+
       g1_policy()->record_collection_pause_start(start_time_sec,
                                                  start_used_bytes);
 
-      guarantee(_in_cset_fast_test == NULL, "invariant");
-      guarantee(_in_cset_fast_test_base == NULL, "invariant");
-      _in_cset_fast_test_length = max_regions();
-      _in_cset_fast_test_base =
-                             NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
-      memset(_in_cset_fast_test_base, false,
-                                     _in_cset_fast_test_length * sizeof(bool));
-      // We're biasing _in_cset_fast_test to avoid subtracting the
-      // beginning of the heap every time we want to index; basically
-      // it's the same with what we do with the card table.
-      _in_cset_fast_test = _in_cset_fast_test_base -
-              ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
-
-#if SCAN_ONLY_VERBOSE
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
       _young_list->print();
-#endif // SCAN_ONLY_VERBOSE
-
-      if (g1_policy()->should_initiate_conc_mark()) {
+#endif // YOUNG_LIST_VERBOSE
+
+      if (g1_policy()->during_initial_mark_pause()) {
         concurrent_mark()->checkpointRootsInitialPre();
       }
       save_marks();
@@ -2778,12 +2731,15 @@
       if (mark_in_progress())
         concurrent_mark()->newCSet();
 
-      // Now choose the CS.
-      g1_policy()->choose_collection_set();
-
-      // We may abandon a pause if we find no region that will fit in the MMU
-      // pause.
-      bool abandoned = (g1_policy()->collection_set() == NULL);
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
+      _young_list->print();
+      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+#endif // YOUNG_LIST_VERBOSE
+
+      // Now choose the CS. We may abandon a pause if we find no
+      // region that will fit in the MMU pause.
+      bool abandoned = g1_policy()->choose_collection_set();
 
       // Nothing to do if we were unable to choose a collection set.
       if (!abandoned) {
@@ -2801,40 +2757,64 @@
 
         // Actually do the work...
         evacuate_collection_set();
+
         free_collection_set(g1_policy()->collection_set());
         g1_policy()->clear_collection_set();
 
-        FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
-        // this is more for peace of mind; we're nulling them here and
-        // we're expecting them to be null at the beginning of the next GC
-        _in_cset_fast_test = NULL;
-        _in_cset_fast_test_base = NULL;
-
         cleanup_surviving_young_words();
 
+        // Start a new incremental collection set for the next pause.
+        g1_policy()->start_incremental_cset_building();
+
+        // Clear the _cset_fast_test bitmap in anticipation of adding
+        // regions to the incremental collection set for the next
+        // evacuation pause.
+        clear_cset_fast_test();
+
         if (g1_policy()->in_young_gc_mode()) {
           _young_list->reset_sampled_info();
-          assert(check_young_list_empty(true),
-                 "young list should be empty");
-
-#if SCAN_ONLY_VERBOSE
+
+          // Don't check the whole heap at this point as the
+          // GC alloc regions from this pause have been tagged
+          // as survivors and moved on to the survivor list.
+          // Survivor regions will fail the !is_young() check.
+          assert(check_young_list_empty(false /* check_heap */),
+              "young list should be empty");
+
+#if YOUNG_LIST_VERBOSE
+          gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
           _young_list->print();
-#endif // SCAN_ONLY_VERBOSE
+#endif // YOUNG_LIST_VERBOSE
 
           g1_policy()->record_survivor_regions(_young_list->survivor_length(),
                                           _young_list->first_survivor_region(),
                                           _young_list->last_survivor_region());
+
           _young_list->reset_auxilary_lists();
         }
       } else {
-        if (_in_cset_fast_test != NULL) {
-          assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
-          FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
-          //  this is more for peace of mind; we're nulling them here and
-          // we're expecting them to be null at the beginning of the next GC
-          _in_cset_fast_test = NULL;
-          _in_cset_fast_test_base = NULL;
-        }
+        // We have abandoned the current collection. This can only happen
+        // if we're not doing young or partially young collections, and
+        // we didn't find an old region that we're able to collect within
+        // the allowed time.
+
+        assert(g1_policy()->collection_set() == NULL, "should be");
+        assert(_young_list->length() == 0, "because it should be");
+
+        // This should be a no-op.
+        abandon_collection_set(g1_policy()->inc_cset_head());
+
+        g1_policy()->clear_incremental_cset();
+        g1_policy()->stop_incremental_cset_building();
+
+        // Start a new incremental collection set for the next pause.
+        g1_policy()->start_incremental_cset_building();
+
+        // Clear the _cset_fast_test bitmap in anticipation of adding
+        // regions to the incremental collection set for the next
+        // evacuation pause.
+        clear_cset_fast_test();
+
         // This looks confusing, because the DPT should really be empty
         // at this point -- since we have not done any collection work,
         // there should not be any derived pointers in the table to update;
@@ -2855,7 +2835,7 @@
       }
 
       if (g1_policy()->in_young_gc_mode() &&
-          g1_policy()->should_initiate_conc_mark()) {
+          g1_policy()->during_initial_mark_pause()) {
         concurrent_mark()->checkpointRootsInitialPost();
         set_marking_started();
         // CAUTION: after the doConcurrentMark() call below,
@@ -2868,9 +2848,11 @@
         doConcurrentMark();
       }
 
-#if SCAN_ONLY_VERBOSE
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
       _young_list->print();
-#endif // SCAN_ONLY_VERBOSE
+      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+#endif // YOUNG_LIST_VERBOSE
 
       double end_time_sec = os::elapsedTime();
       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
@@ -2928,12 +2910,34 @@
   }
 }
 
+size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
+{
+  size_t gclab_word_size;
+  switch (purpose) {
+    case GCAllocForSurvived:
+      gclab_word_size = YoungPLABSize;
+      break;
+    case GCAllocForTenured:
+      gclab_word_size = OldPLABSize;
+      break;
+    default:
+      assert(false, "unknown GCAllocPurpose");
+      gclab_word_size = OldPLABSize;
+      break;
+  }
+  return gclab_word_size;
+}
+
+
 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
   // make sure we don't call set_gc_alloc_region() multiple times on
   // the same region
   assert(r == NULL || !r->is_gc_alloc_region(),
          "shouldn't already be a GC alloc region");
+  assert(r == NULL || !r->isHumongous(),
+         "humongous regions shouldn't be used as GC alloc regions");
+
   HeapWord* original_top = NULL;
   if (r != NULL)
     original_top = r->top();
@@ -3076,12 +3080,17 @@
 
       if (alloc_region->in_collection_set() ||
           alloc_region->top() == alloc_region->end() ||
-          alloc_region->top() == alloc_region->bottom()) {
-        // we will discard the current GC alloc region if it's in the
-        // collection set (it can happen!), if it's already full (no
-        // point in using it), or if it's empty (this means that it
-        // was emptied during a cleanup and it should be on the free
-        // list now).
+          alloc_region->top() == alloc_region->bottom() ||
+          alloc_region->isHumongous()) {
+        // we will discard the current GC alloc region if
+        // * it's in the collection set (it can happen!),
+        // * it's already full (no point in using it),
+        // * it's empty (this means that it was emptied during
+        // a cleanup and it should be on the free list now), or
+        // * it's humongous (this means that it was emptied
+        // during a cleanup and was added to the free list, but
+        // has been subseqently used to allocate a humongous
+        // object that may be less than the region size).
 
         alloc_region = NULL;
       }
@@ -3093,6 +3102,11 @@
     } else {
       // the region was retained from the last collection
       ++_gc_alloc_region_counts[ap];
+      if (G1PrintHeapRegions) {
+        gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
+                               "top "PTR_FORMAT,
+                               alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
+      }
     }
 
     if (alloc_region != NULL) {
@@ -3480,7 +3494,7 @@
   HeapRegion* r = heap_region_containing(old);
   if (!r->evacuation_failed()) {
     r->set_evacuation_failed(true);
-    if (G1PrintRegions) {
+    if (G1PrintHeapRegions) {
       gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
                           "["PTR_FORMAT","PTR_FORMAT")\n",
                           r, r->bottom(), r->end());
@@ -3649,6 +3663,8 @@
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
+    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
     _age_table(false),
 #if G1_DETAILED_STATS
     _pushes(0), _pops(0), _steals(0),
@@ -3675,6 +3691,9 @@
 
   _overflowed_refs = new OverflowQueue(10);
 
+  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+
   _start = os::elapsedTime();
 }
 
@@ -3972,16 +3991,13 @@
 
     OopsInHeapRegionClosure        *scan_root_cl;
     OopsInHeapRegionClosure        *scan_perm_cl;
-    OopsInHeapRegionClosure        *scan_so_cl;
-
-    if (_g1h->g1_policy()->should_initiate_conc_mark()) {
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
       scan_root_cl = &scan_mark_root_cl;
       scan_perm_cl = &scan_mark_perm_cl;
-      scan_so_cl   = &scan_mark_heap_rs_cl;
     } else {
       scan_root_cl = &only_scan_root_cl;
       scan_perm_cl = &only_scan_perm_cl;
-      scan_so_cl   = &only_scan_heap_rs_cl;
     }
 
     pss.start_strong_roots();
@@ -3989,7 +4005,6 @@
                                   SharedHeap::SO_AllClasses,
                                   scan_root_cl,
                                   &push_heap_rs_cl,
-                                  scan_so_cl,
                                   scan_perm_cl,
                                   i);
     pss.end_strong_roots();
@@ -4002,9 +4017,7 @@
       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
       _g1h->g1_policy()->record_termination_time(i, term_ms);
     }
-    if (G1UseSurvivorSpaces) {
-      _g1h->g1_policy()->record_thread_age_table(pss.age_table());
-    }
+    _g1h->g1_policy()->record_thread_age_table(pss.age_table());
     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
 
     // Clean up any par-expanded rem sets.
@@ -4053,7 +4066,6 @@
                         SharedHeap::ScanningOption so,
                         OopClosure* scan_non_heap_roots,
                         OopsInHeapRegionClosure* scan_rs,
-                        OopsInHeapRegionClosure* scan_so,
                         OopsInGenClosure* scan_perm,
                         int worker_i) {
   // First scan the strong roots, including the perm gen.
@@ -4073,6 +4085,7 @@
                        &buf_scan_non_heap_roots,
                        &eager_scan_code_roots,
                        &buf_scan_perm);
+
   // Finish up any enqueued closure apps.
   buf_scan_non_heap_roots.done();
   buf_scan_perm.done();
@@ -4095,9 +4108,6 @@
 
   // XXX What should this be doing in the parallel case?
   g1_policy()->record_collection_pause_end_CH_strong_roots();
-  if (scan_so != NULL) {
-    scan_scan_only_set(scan_so, worker_i);
-  }
   // Now scan the complement of the collection set.
   if (scan_rs != NULL) {
     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
@@ -4111,54 +4121,6 @@
 }
 
 void
-G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
-                                       OopsInHeapRegionClosure* oc,
-                                       int worker_i) {
-  HeapWord* startAddr = r->bottom();
-  HeapWord* endAddr = r->used_region().end();
-
-  oc->set_region(r);
-
-  HeapWord* p = r->bottom();
-  HeapWord* t = r->top();
-  guarantee( p == r->next_top_at_mark_start(), "invariant" );
-  while (p < t) {
-    oop obj = oop(p);
-    p += obj->oop_iterate(oc);
-  }
-}
-
-void
-G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
-                                    int worker_i) {
-  double start = os::elapsedTime();
-
-  BufferingOopsInHeapRegionClosure boc(oc);
-
-  FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
-  FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
-
-  OopsInHeapRegionClosure *foc;
-  if (g1_policy()->should_initiate_conc_mark())
-    foc = &scan_and_mark;
-  else
-    foc = &scan_only;
-
-  HeapRegion* hr;
-  int n = 0;
-  while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
-    scan_scan_only_region(hr, foc, worker_i);
-    ++n;
-  }
-  boc.done();
-
-  double closure_app_s = boc.closure_app_seconds();
-  g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
-  double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
-  g1_policy()->record_scan_only_time(worker_i, ms, n);
-}
-
-void
 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
                                        OopClosure* non_root_closure) {
   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
@@ -4356,17 +4318,14 @@
 class G1ParCleanupCTTask : public AbstractGangTask {
   CardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
-  HeapRegion* volatile _so_head;
   HeapRegion* volatile _su_head;
 public:
   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
                      G1CollectedHeap* g1h,
-                     HeapRegion* scan_only_list,
                      HeapRegion* survivor_list) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
     _ct_bs(ct_bs),
     _g1h(g1h),
-    _so_head(scan_only_list),
     _su_head(survivor_list)
   { }
 
@@ -4375,14 +4334,13 @@
     while (r = _g1h->pop_dirty_cards_region()) {
       clear_cards(r);
     }
-    // Redirty the cards of the scan-only and survivor regions.
-    dirty_list(&this->_so_head);
+    // Redirty the cards of the survivor regions.
     dirty_list(&this->_su_head);
   }
 
   void clear_cards(HeapRegion* r) {
-    // Cards for Survivor and Scan-Only regions will be dirtied later.
-    if (!r->is_scan_only() && !r->is_survivor()) {
+    // Cards for Survivor regions will be dirtied later.
+    if (!r->is_survivor()) {
       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
     }
   }
@@ -4415,7 +4373,7 @@
   virtual bool doHeapRegion(HeapRegion* r)
   {
     MemRegion mr(r->bottom(), r->end());
-    if (r->is_scan_only() || r->is_survivor()) {
+    if (r->is_survivor()) {
       _ct_bs->verify_dirty_region(mr);
     } else {
       _ct_bs->verify_clean_region(mr);
@@ -4431,8 +4389,8 @@
 
   // Iterate over the dirty cards region list.
   G1ParCleanupCTTask cleanup_task(ct_bs, this,
-                                  _young_list->first_scan_only_region(),
                                   _young_list->first_survivor_region());
+
   if (ParallelGCThreads > 0) {
     set_par_threads(workers()->total_workers());
     workers()->run_task(&cleanup_task);
@@ -4448,12 +4406,12 @@
       }
       r->set_next_dirty_cards_region(NULL);
     }
-    // now, redirty the cards of the scan-only and survivor regions
+    // now, redirty the cards of the survivor regions
     // (it seemed faster to do it this way, instead of iterating over
     // all regions and then clearing / dirtying as appropriate)
-    dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
     dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
   }
+
   double elapsed = os::elapsedTime() - start;
   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
 #ifndef PRODUCT
@@ -4474,6 +4432,11 @@
   double young_time_ms     = 0.0;
   double non_young_time_ms = 0.0;
 
+  // Since the collection set is a superset of the the young list,
+  // all we need to do to clear the young list is clear its
+  // head and length, and unlink any young regions in the code below
+  _young_list->clear();
+
   G1CollectorPolicy* policy = g1_policy();
 
   double start_sec = os::elapsedTime();
@@ -4517,6 +4480,12 @@
       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
       size_t words_survived = _surviving_young_words[index];
       cur->record_surv_words_in_group(words_survived);
+
+      // At this point the we have 'popped' cur from the collection set
+      // (linked via next_in_collection_set()) but it is still in the
+      // young list (linked via next_young_region()). Clear the
+      // _next_young_region field.
+      cur->set_next_young_region(NULL);
     } else {
       int index = cur->young_index_in_cset();
       guarantee( index == -1, "invariant" );
@@ -4532,7 +4501,6 @@
              "Should not have empty regions in a CS.");
       free_region(cur);
     } else {
-      guarantee( !cur->is_scan_only(), "should not be scan only" );
       cur->uninstall_surv_rate_group();
       if (cur->is_young())
         cur->set_young_index_in_cset(-1);
@@ -4556,6 +4524,27 @@
   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
 }
 
+// This routine is similar to the above but does not record
+// any policy statistics or update free lists; we are abandoning
+// the current incremental collection set in preparation of a
+// full collection. After the full GC we will start to build up
+// the incremental collection set again.
+// This is only called when we're doing a full collection
+// and is immediately followed by the tearing down of the young list.
+
+void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
+  HeapRegion* cur = cs_head;
+
+  while (cur != NULL) {
+    HeapRegion* next = cur->next_in_collection_set();
+    assert(cur->in_collection_set(), "bad CS");
+    cur->set_next_in_collection_set(NULL);
+    cur->set_in_collection_set(false);
+    cur->set_young_index_in_cset(-1);
+    cur = next;
+  }
+}
+
 HeapRegion*
 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
   assert(ZF_mon->owned_by_self(), "Precondition");
@@ -4922,12 +4911,10 @@
   bool success() { return _success; }
 };
 
-bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
-                                             bool check_sample) {
-  bool ret = true;
-
-  ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
-  if (!ignore_scan_only_list) {
+bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
+  bool ret = _young_list->check_list_empty(check_sample);
+
+  if (check_heap) {
     NoYoungRegionsClosure closure;
     heap_region_iterate(&closure);
     ret = ret && closure.success();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,8 +56,8 @@
 #  define IF_G1_DETAILED_STATS(code)
 #endif
 
-typedef GenericTaskQueue<StarTask>    RefToScanQueue;
-typedef GenericTaskQueueSet<StarTask> RefToScanQueueSet;
+typedef GenericTaskQueue<StarTask>          RefToScanQueue;
+typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
 
 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
@@ -81,33 +81,29 @@
 
   HeapRegion* _head;
 
-  HeapRegion* _scan_only_head;
-  HeapRegion* _scan_only_tail;
+  HeapRegion* _survivor_head;
+  HeapRegion* _survivor_tail;
+
+  HeapRegion* _curr;
+
   size_t      _length;
-  size_t      _scan_only_length;
+  size_t      _survivor_length;
 
   size_t      _last_sampled_rs_lengths;
   size_t      _sampled_rs_lengths;
-  HeapRegion* _curr;
-  HeapRegion* _curr_scan_only;
 
-  HeapRegion* _survivor_head;
-  HeapRegion* _survivor_tail;
-  size_t      _survivor_length;
-
-  void          empty_list(HeapRegion* list);
+  void         empty_list(HeapRegion* list);
 
 public:
   YoungList(G1CollectedHeap* g1h);
 
-  void          push_region(HeapRegion* hr);
-  void          add_survivor_region(HeapRegion* hr);
-  HeapRegion*   pop_region();
-  void          empty_list();
-  bool          is_empty() { return _length == 0; }
-  size_t        length() { return _length; }
-  size_t        scan_only_length() { return _scan_only_length; }
-  size_t        survivor_length() { return _survivor_length; }
+  void         push_region(HeapRegion* hr);
+  void         add_survivor_region(HeapRegion* hr);
+
+  void         empty_list();
+  bool         is_empty() { return _length == 0; }
+  size_t       length() { return _length; }
+  size_t       survivor_length() { return _survivor_length; }
 
   void rs_length_sampling_init();
   bool rs_length_sampling_more();
@@ -120,22 +116,21 @@
 
   // for development purposes
   void reset_auxilary_lists();
+  void clear() { _head = NULL; _length = 0; }
+
+  void clear_survivors() {
+    _survivor_head    = NULL;
+    _survivor_tail    = NULL;
+    _survivor_length  = 0;
+  }
+
   HeapRegion* first_region() { return _head; }
-  HeapRegion* first_scan_only_region() { return _scan_only_head; }
   HeapRegion* first_survivor_region() { return _survivor_head; }
   HeapRegion* last_survivor_region() { return _survivor_tail; }
-  HeapRegion* par_get_next_scan_only_region() {
-    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-    HeapRegion* ret = _curr_scan_only;
-    if (ret != NULL)
-      _curr_scan_only = ret->get_next_young_region();
-    return ret;
-  }
 
   // debugging
   bool          check_list_well_formed();
-  bool          check_list_empty(bool ignore_scan_only_list,
-                                 bool check_sample = true);
+  bool          check_list_empty(bool check_sample = true);
   void          print();
 };
 
@@ -232,6 +227,9 @@
   // current collection.
   HeapRegion* _gc_alloc_region_list;
 
+  // Determines PLAB size for a particular allocation purpose.
+  static size_t desired_plab_sz(GCAllocPurpose purpose);
+
   // When called by par thread, require par_alloc_during_gc_lock() to be held.
   void push_gc_alloc_region(HeapRegion* hr);
 
@@ -402,8 +400,7 @@
     assert(_in_cset_fast_test_base != NULL, "sanity");
     assert(r->in_collection_set(), "invariant");
     int index = r->hrs_index();
-    assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length,
-           "invariant");
+    assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
     assert(!_in_cset_fast_test_base[index], "invariant");
     _in_cset_fast_test_base[index] = true;
   }
@@ -428,6 +425,12 @@
     }
   }
 
+  void clear_cset_fast_test() {
+    assert(_in_cset_fast_test_base != NULL, "sanity");
+    memset(_in_cset_fast_test_base, false,
+        _in_cset_fast_test_length * sizeof(bool));
+  }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -473,6 +476,10 @@
   // regions.
   void free_collection_set(HeapRegion* cs_head);
 
+  // Abandon the current collection set without recording policy
+  // statistics or updating free lists.
+  void abandon_collection_set(HeapRegion* cs_head);
+
   // Applies "scan_non_heap_roots" to roots outside the heap,
   // "scan_rs" to roots inside the heap (having done "set_region" to
   // indicate the region in which the root resides), and does "scan_perm"
@@ -485,16 +492,9 @@
                                SharedHeap::ScanningOption so,
                                OopClosure* scan_non_heap_roots,
                                OopsInHeapRegionClosure* scan_rs,
-                               OopsInHeapRegionClosure* scan_so,
                                OopsInGenClosure* scan_perm,
                                int worker_i);
 
-  void scan_scan_only_set(OopsInHeapRegionClosure* oc,
-                          int worker_i);
-  void scan_scan_only_region(HeapRegion* hr,
-                             OopsInHeapRegionClosure* oc,
-                             int worker_i);
-
   // Apply "blk" to all the weak roots of the system.  These include
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table, and referents of reachable weak refs.
@@ -1055,7 +1055,12 @@
 
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
-    return word_size >= _humongous_object_threshold_in_words;
+    // Note this has to be strictly greater-than as the TLABs
+    // are capped at the humongous thresold and we want to
+    // ensure that we don't try to allocate a TLAB as
+    // humongous and that we don't allocate a humongous
+    // object in a TLAB.
+    return word_size > _humongous_object_threshold_in_words;
   }
 
   // Update mod union table with the set of dirty cards.
@@ -1128,36 +1133,14 @@
   void set_region_short_lived_locked(HeapRegion* hr);
   // add appropriate methods for any other surv rate groups
 
-  void young_list_rs_length_sampling_init() {
-    _young_list->rs_length_sampling_init();
-  }
-  bool young_list_rs_length_sampling_more() {
-    return _young_list->rs_length_sampling_more();
-  }
-  void young_list_rs_length_sampling_next() {
-    _young_list->rs_length_sampling_next();
-  }
-  size_t young_list_sampled_rs_lengths() {
-    return _young_list->sampled_rs_lengths();
-  }
-
-  size_t young_list_length()   { return _young_list->length(); }
-  size_t young_list_scan_only_length() {
-                                      return _young_list->scan_only_length(); }
-
-  HeapRegion* pop_region_from_young_list() {
-    return _young_list->pop_region();
-  }
-
-  HeapRegion* young_list_first_region() {
-    return _young_list->first_region();
-  }
+  YoungList* young_list() { return _young_list; }
 
   // debugging
   bool check_young_list_well_formed() {
     return _young_list->check_list_well_formed();
   }
-  bool check_young_list_empty(bool ignore_scan_only_list,
+
+  bool check_young_list_empty(bool check_heap,
                               bool check_sample = true);
 
   // *** Stuff related to concurrent marking.  It's not clear to me that so
@@ -1362,12 +1345,18 @@
     return BitsPerWord << shifter();
   }
 
-  static size_t gclab_word_size() {
-    return G1ParallelGCAllocBufferSize / HeapWordSize;
+  size_t gclab_word_size() const {
+    return _gclab_word_size;
   }
 
-  static size_t bitmap_size_in_bits() {
-    size_t bits_in_bitmap = gclab_word_size() >> shifter();
+  // Calculates actual GCLab size in words
+  size_t gclab_real_word_size() const {
+    return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
+           / BitsPerWord;
+  }
+
+  static size_t bitmap_size_in_bits(size_t gclab_word_size) {
+    size_t bits_in_bitmap = gclab_word_size >> shifter();
     // We are going to ensure that the beginning of a word in this
     // bitmap also corresponds to the beginning of a word in the
     // global marking bitmap. To handle the case where a GCLab
@@ -1377,13 +1366,13 @@
     return bits_in_bitmap + BitsPerWord - 1;
   }
 public:
-  GCLabBitMap(HeapWord* heap_start)
-    : BitMap(bitmap_size_in_bits()),
+  GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
+    : BitMap(bitmap_size_in_bits(gclab_word_size)),
       _cm(G1CollectedHeap::heap()->concurrent_mark()),
       _shifter(shifter()),
       _bitmap_word_covers_words(bitmap_word_covers_words()),
       _heap_start(heap_start),
-      _gclab_word_size(gclab_word_size()),
+      _gclab_word_size(gclab_word_size),
       _real_start_word(NULL),
       _real_end_word(NULL),
       _start_word(NULL)
@@ -1478,7 +1467,7 @@
       mark_bitmap->mostly_disjoint_range_union(this,
                                 0, // always start from the start of the bitmap
                                 _start_word,
-                                size_in_words());
+                                gclab_real_word_size());
       _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
 
 #ifndef PRODUCT
@@ -1490,9 +1479,10 @@
     }
   }
 
-  static size_t bitmap_size_in_words() {
-    return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
+  size_t bitmap_size_in_words() const {
+    return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
   }
+
 };
 
 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
@@ -1502,10 +1492,10 @@
   GCLabBitMap _bitmap;
 
 public:
-  G1ParGCAllocBuffer() :
-    ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
+  G1ParGCAllocBuffer(size_t gclab_word_size) :
+    ParGCAllocBuffer(gclab_word_size),
     _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
-    _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
+    _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
     _retired(false)
   { }
 
@@ -1544,8 +1534,10 @@
   typedef GrowableArray<StarTask> OverflowQueue;
   OverflowQueue* _overflowed_refs;
 
-  G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
-  ageTable           _age_table;
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  ageTable            _age_table;
 
   size_t           _alloc_buffer_waste;
   size_t           _undo_waste;
@@ -1614,7 +1606,7 @@
   ageTable*         age_table()       { return &_age_table;       }
 
   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return &_alloc_buffers[purpose];
+    return _alloc_buffers[purpose];
   }
 
   size_t alloc_buffer_waste()                    { return _alloc_buffer_waste; }
@@ -1679,15 +1671,15 @@
   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
 
     HeapWord* obj = NULL;
-    if (word_sz * 100 <
-        (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
-                                                  ParallelGCBufferWastePct) {
+    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
+      assert(gclab_word_size == alloc_buf->word_sz(),
+             "dynamic resizing is not supported");
       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
       alloc_buf->retire(false, false);
 
-      HeapWord* buf =
-        _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
+      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
       if (buf == NULL) return NULL; // Let caller handle allocation failure.
       // Otherwise.
       alloc_buf->set_buf(buf);
@@ -1781,9 +1773,9 @@
 
   void retire_alloc_buffers() {
     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      size_t waste = _alloc_buffers[ap].words_remaining();
+      size_t waste = _alloc_buffers[ap]->words_remaining();
       add_to_alloc_buffer_waste(waste);
-      _alloc_buffers[ap].retire(true, false);
+      _alloc_buffers[ap]->retire(true, false);
     }
   }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,10 +42,6 @@
   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
 };
 
-static double cost_per_scan_only_region_ms_defaults[] = {
-  1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
-};
-
 // all the same
 static double fully_young_cards_per_entry_ratio_defaults[] = {
   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
@@ -125,7 +121,6 @@
   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _cost_per_scan_only_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   _partially_young_cards_per_entry_ratio_seq(
                                          new TruncatedSeq(TruncatedSeqLength)),
@@ -133,7 +128,6 @@
   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _cost_per_scan_only_region_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _non_young_other_cost_per_region_ms_seq(
@@ -178,14 +172,30 @@
   // so the hack is to do the cast  QQQ FIXME
   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   _n_marks_since_last_pause(0),
-  _conc_mark_initiated(false),
-  _should_initiate_conc_mark(false),
+  _initiate_conc_mark_if_possible(false),
+  _during_initial_mark_pause(false),
   _should_revert_to_full_young_gcs(false),
   _last_full_young_gc(false),
 
   _prev_collection_pause_used_at_end_bytes(0),
 
   _collection_set(NULL),
+  _collection_set_size(0),
+  _collection_set_bytes_used_before(0),
+
+  // Incremental CSet attributes
+  _inc_cset_build_state(Inactive),
+  _inc_cset_head(NULL),
+  _inc_cset_tail(NULL),
+  _inc_cset_size(0),
+  _inc_cset_young_index(0),
+  _inc_cset_bytes_used_before(0),
+  _inc_cset_max_finger(NULL),
+  _inc_cset_recorded_young_bytes(0),
+  _inc_cset_recorded_rs_lengths(0),
+  _inc_cset_predicted_elapsed_time_ms(0.0),
+  _inc_cset_predicted_bytes_to_copy(0),
+
 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif // _MSC_VER
@@ -198,7 +208,9 @@
   _recorded_survivor_regions(0),
   _recorded_survivor_head(NULL),
   _recorded_survivor_tail(NULL),
-  _survivors_age_table(true)
+  _survivors_age_table(true),
+
+  _gc_overhead_perc(0.0)
 
 {
   // Set up the region size and associated fields. Given that the
@@ -207,13 +219,20 @@
   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   HeapRegionRemSet::setup_remset_size();
 
+  // Verify PLAB sizes
+  const uint region_size = HeapRegion::GrainWords;
+  if (YoungPLABSize > region_size || OldPLABSize > region_size) {
+    char buffer[128];
+    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
+                 OldPLABSize > region_size ? "Old" : "Young", region_size);
+    vm_exit_during_initialization(buffer);
+  }
+
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 
   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
-  _par_last_scan_only_times_ms = new double[_parallel_gc_threads];
-  _par_last_scan_only_regions_scanned = new double[_parallel_gc_threads];
 
   _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
@@ -243,8 +262,6 @@
   _pending_card_diff_seq->add(0.0);
   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
-  _cost_per_scan_only_region_ms_seq->add(
-                                 cost_per_scan_only_region_ms_defaults[index]);
   _fully_young_cards_per_entry_ratio_seq->add(
                             fully_young_cards_per_entry_ratio_defaults[index]);
   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
@@ -270,14 +287,15 @@
   _concurrent_mark_cleanup_times_ms->add(0.20);
   _tenuring_threshold = MaxTenuringThreshold;
 
-  if (G1UseSurvivorSpaces) {
-    // if G1FixedSurvivorSpaceSize is 0 which means the size is not
-    // fixed, then _max_survivor_regions will be calculated at
-    // calculate_young_list_target_config during initialization
-    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
-  } else {
-    _max_survivor_regions = 0;
-  }
+  // if G1FixedSurvivorSpaceSize is 0 which means the size is not
+  // fixed, then _max_survivor_regions will be calculated at
+  // calculate_young_list_target_length during initialization
+  _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
+
+  assert(GCTimeRatio > 0,
+         "we should have set it to a default value set_g1_gc_flags() "
+         "if a user set it to 0");
+  _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 
   initialize_all();
 }
@@ -296,38 +314,67 @@
   CollectorPolicy::initialize_flags();
 }
 
+// The easiest way to deal with the parsing of the NewSize /
+// MaxNewSize / etc. parameteres is to re-use the code in the
+// TwoGenerationCollectorPolicy class. This is similar to what
+// ParallelScavenge does with its GenerationSizer class (see
+// ParallelScavengeHeap::initialize()). We might change this in the
+// future, but it's a good start.
+class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
+  size_t size_to_region_num(size_t byte_size) {
+    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
+  }
+
+public:
+  G1YoungGenSizer() {
+    initialize_flags();
+    initialize_size_info();
+  }
+
+  size_t min_young_region_num() {
+    return size_to_region_num(_min_gen0_size);
+  }
+  size_t initial_young_region_num() {
+    return size_to_region_num(_initial_gen0_size);
+  }
+  size_t max_young_region_num() {
+    return size_to_region_num(_max_gen0_size);
+  }
+};
+
 void G1CollectorPolicy::init() {
   // Set aside an initial future to_space.
   _g1 = G1CollectedHeap::heap();
-  size_t regions = Universe::heap()->capacity() / HeapRegion::GrainBytes;
 
   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 
-  if (G1SteadyStateUsed < 50) {
-    vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
-  }
-
   initialize_gc_policy_counters();
 
   if (G1Gen) {
     _in_young_gc_mode = true;
 
-    if (G1YoungGenSize == 0) {
+    G1YoungGenSizer sizer;
+    size_t initial_region_num = sizer.initial_young_region_num();
+
+    if (UseAdaptiveSizePolicy) {
       set_adaptive_young_list_length(true);
       _young_list_fixed_length = 0;
     } else {
       set_adaptive_young_list_length(false);
-      _young_list_fixed_length = (G1YoungGenSize / HeapRegion::GrainBytes);
+      _young_list_fixed_length = initial_region_num;
     }
-     _free_regions_at_end_of_collection = _g1->free_regions();
-     _scan_only_regions_at_end_of_collection = 0;
-     calculate_young_list_min_length();
-     guarantee( _young_list_min_length == 0, "invariant, not enough info" );
-     calculate_young_list_target_config();
-   } else {
+    _free_regions_at_end_of_collection = _g1->free_regions();
+    calculate_young_list_min_length();
+    guarantee( _young_list_min_length == 0, "invariant, not enough info" );
+    calculate_young_list_target_length();
+  } else {
      _young_list_fixed_length = 0;
     _in_young_gc_mode = false;
   }
+
+  // We may immediately start allocating regions and placing them on the
+  // collection set list. Initialize the per-collection set info
+  start_incremental_cset_building();
 }
 
 // Create the jstat counters for the policy.
@@ -347,115 +394,32 @@
     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
     double alloc_rate_ms = predict_alloc_rate_ms();
     int min_regions = (int) ceil(alloc_rate_ms * when_ms);
-    int current_region_num = (int) _g1->young_list_length();
+    int current_region_num = (int) _g1->young_list()->length();
     _young_list_min_length = min_regions + current_region_num;
   }
 }
 
-void G1CollectorPolicy::calculate_young_list_target_config() {
+void G1CollectorPolicy::calculate_young_list_target_length() {
   if (adaptive_young_list_length()) {
     size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
-    calculate_young_list_target_config(rs_lengths);
+    calculate_young_list_target_length(rs_lengths);
   } else {
     if (full_young_gcs())
       _young_list_target_length = _young_list_fixed_length;
     else
       _young_list_target_length = _young_list_fixed_length / 2;
+
     _young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
-    size_t so_length = calculate_optimal_so_length(_young_list_target_length);
-    guarantee( so_length < _young_list_target_length, "invariant" );
-    _young_list_so_prefix_length = so_length;
   }
   calculate_survivors_policy();
 }
 
-// This method calculate the optimal scan-only set for a fixed young
-// gen size. I couldn't work out how to reuse the more elaborate one,
-// i.e. calculate_young_list_target_config(rs_length), as the loops are
-// fundamentally different (the other one finds a config for different
-// S-O lengths, whereas here we need to do the opposite).
-size_t G1CollectorPolicy::calculate_optimal_so_length(
-                                                    size_t young_list_length) {
-  if (!G1UseScanOnlyPrefix)
-    return 0;
-
-  if (_all_pause_times_ms->num() < 3) {
-    // we won't use a scan-only set at the beginning to allow the rest
-    // of the predictors to warm up
-    return 0;
-  }
-
-  if (_cost_per_scan_only_region_ms_seq->num() < 3) {
-    // then, we'll only set the S-O set to 1 for a little bit of time,
-    // to get enough information on the scanning cost
-    return 1;
-  }
-
-  size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
-  size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
-  size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
-  size_t scanned_cards;
-  if (full_young_gcs())
-    scanned_cards = predict_young_card_num(adj_rs_lengths);
-  else
-    scanned_cards = predict_non_young_card_num(adj_rs_lengths);
-  double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
-                                                     scanned_cards);
-
-  size_t so_length = 0;
-  double max_gc_eff = 0.0;
-  for (size_t i = 0; i < young_list_length; ++i) {
-    double gc_eff = 0.0;
-    double pause_time_ms = 0.0;
-    predict_gc_eff(young_list_length, i, base_time_ms,
-                   &gc_eff, &pause_time_ms);
-    if (gc_eff > max_gc_eff) {
-      max_gc_eff = gc_eff;
-      so_length = i;
-    }
-  }
-
-  // set it to 95% of the optimal to make sure we sample the "area"
-  // around the optimal length to get up-to-date survival rate data
-  return so_length * 950 / 1000;
-}
-
-// This is a really cool piece of code! It finds the best
-// target configuration (young length / scan-only prefix length) so
-// that GC efficiency is maximized and that we also meet a pause
-// time. It's a triple nested loop. These loops are explained below
-// from the inside-out :-)
-//
-// (a) The innermost loop will try to find the optimal young length
-// for a fixed S-O length. It uses a binary search to speed up the
-// process. We assume that, for a fixed S-O length, as we add more
-// young regions to the CSet, the GC efficiency will only go up (I'll
-// skip the proof). So, using a binary search to optimize this process
-// makes perfect sense.
-//
-// (b) The middle loop will fix the S-O length before calling the
-// innermost one. It will vary it between two parameters, increasing
-// it by a given increment.
-//
-// (c) The outermost loop will call the middle loop three times.
-//   (1) The first time it will explore all possible S-O length values
-//   from 0 to as large as it can get, using a coarse increment (to
-//   quickly "home in" to where the optimal seems to be).
-//   (2) The second time it will explore the values around the optimal
-//   that was found by the first iteration using a fine increment.
-//   (3) Once the optimal config has been determined by the second
-//   iteration, we'll redo the calculation, but setting the S-O length
-//   to 95% of the optimal to make sure we sample the "area"
-//   around the optimal length to get up-to-date survival rate data
-//
-// Termination conditions for the iterations are several: the pause
-// time is over the limit, we do not have enough to-space, etc.
-
-void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
+void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
   guarantee( adaptive_young_list_length(), "pre-condition" );
+  guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
 
   double start_time_sec = os::elapsedTime();
-  size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1MinReservePercent);
+  size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
   min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
   size_t reserve_regions =
     (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
@@ -466,285 +430,80 @@
     double survivor_regions_evac_time =
         predict_survivor_regions_evac_time();
 
-    size_t min_so_length = 0;
-    size_t max_so_length = 0;
-
-    if (G1UseScanOnlyPrefix) {
-      if (_all_pause_times_ms->num() < 3) {
-        // we won't use a scan-only set at the beginning to allow the rest
-        // of the predictors to warm up
-        min_so_length = 0;
-        max_so_length = 0;
-      } else if (_cost_per_scan_only_region_ms_seq->num() < 3) {
-        // then, we'll only set the S-O set to 1 for a little bit of time,
-        // to get enough information on the scanning cost
-        min_so_length = 1;
-        max_so_length = 1;
-      } else if (_in_marking_window || _last_full_young_gc) {
-        // no S-O prefix during a marking phase either, as at the end
-        // of the marking phase we'll have to use a very small young
-        // length target to fill up the rest of the CSet with
-        // non-young regions and, if we have lots of scan-only regions
-        // left-over, we will not be able to add any more non-young
-        // regions.
-        min_so_length = 0;
-        max_so_length = 0;
-      } else {
-        // this is the common case; we'll never reach the maximum, we
-        // one of the end conditions will fire well before that
-        // (hopefully!)
-        min_so_length = 0;
-        max_so_length = _free_regions_at_end_of_collection - 1;
-      }
-    } else {
-      // no S-O prefix, as the switch is not set, but we still need to
-      // do one iteration to calculate the best young target that
-      // meets the pause time; this way we reuse the same code instead
-      // of replicating it
-      min_so_length = 0;
-      max_so_length = 0;
-    }
-
     double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
     size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
     size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
-    size_t scanned_cards;
-    if (full_young_gcs())
-      scanned_cards = predict_young_card_num(adj_rs_lengths);
-    else
-      scanned_cards = predict_non_young_card_num(adj_rs_lengths);
-    // calculate this once, so that we don't have to recalculate it in
-    // the innermost loop
+    size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
     double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
                           + survivor_regions_evac_time;
+
     // the result
     size_t final_young_length = 0;
-    size_t final_so_length = 0;
-    double final_gc_eff = 0.0;
-    // we'll also keep track of how many times we go into the inner loop
-    // this is for profiling reasons
-    size_t calculations = 0;
-
-    // this determines which of the three iterations the outer loop is in
-    typedef enum {
-      pass_type_coarse,
-      pass_type_fine,
-      pass_type_final
-    } pass_type_t;
-
-    // range of the outer loop's iteration
-    size_t from_so_length   = min_so_length;
-    size_t to_so_length     = max_so_length;
-    guarantee( from_so_length <= to_so_length, "invariant" );
-
-    // this will keep the S-O length that's found by the second
-    // iteration of the outer loop; we'll keep it just in case the third
-    // iteration fails to find something
-    size_t fine_so_length   = 0;
-
-    // the increment step for the coarse (first) iteration
-    size_t so_coarse_increments = 5;
-
-    // the common case, we'll start with the coarse iteration
-    pass_type_t pass = pass_type_coarse;
-    size_t so_length_incr = so_coarse_increments;
-
-    if (from_so_length == to_so_length) {
-      // not point in doing the coarse iteration, we'll go directly into
-      // the fine one (we essentially trying to find the optimal young
-      // length for a fixed S-O length).
-      so_length_incr = 1;
-      pass = pass_type_final;
-    } else if (to_so_length - from_so_length < 3 * so_coarse_increments) {
-      // again, the range is too short so no point in foind the coarse
-      // iteration either
-      so_length_incr = 1;
-      pass = pass_type_fine;
-    }
-
-    bool done = false;
-    // this is the outermost loop
-    while (!done) {
-#ifdef TRACE_CALC_YOUNG_CONFIG
-      // leave this in for debugging, just in case
-      gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
-                             ", incr " SIZE_FORMAT ", pass %s",
-                             from_so_length, to_so_length, so_length_incr,
-                             (pass == pass_type_coarse) ? "coarse" :
-                             (pass == pass_type_fine) ? "fine" : "final");
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-      size_t so_length = from_so_length;
-      size_t init_free_regions =
-        MAX2((size_t)0,
-             _free_regions_at_end_of_collection +
-             _scan_only_regions_at_end_of_collection - reserve_regions);
-
-      // this determines whether a configuration was found
-      bool gc_eff_set = false;
-      // this is the middle loop
-      while (so_length <= to_so_length) {
-        // base time, which excludes region-related time; again we
-        // calculate it once to avoid recalculating it in the
-        // innermost loop
-        double base_time_with_so_ms =
-                           base_time_ms + predict_scan_only_time_ms(so_length);
-        // it's already over the pause target, go around
-        if (base_time_with_so_ms > target_pause_time_ms)
-          break;
-
-        size_t starting_young_length = so_length+1;
-
-        // we make sure that the short young length that makes sense
-        // (one more than the S-O length) is feasible
-        size_t min_young_length = starting_young_length;
-        double min_gc_eff;
-        bool min_ok;
-        ++calculations;
-        min_ok = predict_gc_eff(min_young_length, so_length,
-                                base_time_with_so_ms,
-                                init_free_regions, target_pause_time_ms,
-                                &min_gc_eff);
-
-        if (min_ok) {
-          // the shortest young length is indeed feasible; we'll know
-          // set up the max young length and we'll do a binary search
-          // between min_young_length and max_young_length
-          size_t max_young_length = _free_regions_at_end_of_collection - 1;
-          double max_gc_eff = 0.0;
-          bool max_ok = false;
-
-          // the innermost loop! (finally!)
-          while (max_young_length > min_young_length) {
-            // we'll make sure that min_young_length is always at a
-            // feasible config
-            guarantee( min_ok, "invariant" );
-
-            ++calculations;
-            max_ok = predict_gc_eff(max_young_length, so_length,
-                                    base_time_with_so_ms,
-                                    init_free_regions, target_pause_time_ms,
-                                    &max_gc_eff);
+
+    size_t init_free_regions =
+      MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
+
+    // if we're still under the pause target...
+    if (base_time_ms <= target_pause_time_ms) {
+      // We make sure that the shortest young length that makes sense
+      // fits within the target pause time.
+      size_t min_young_length = 1;
+
+      if (predict_will_fit(min_young_length, base_time_ms,
+                                     init_free_regions, target_pause_time_ms)) {
+        // The shortest young length will fit within the target pause time;
+        // we'll now check whether the absolute maximum number of young
+        // regions will fit in the target pause time. If not, we'll do
+        // a binary search between min_young_length and max_young_length
+        size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
+        size_t max_young_length = abs_max_young_length;
+
+        if (max_young_length > min_young_length) {
+          // Let's check if the initial max young length will fit within the
+          // target pause. If so then there is no need to search for a maximal
+          // young length - we'll return the initial maximum
+
+          if (predict_will_fit(max_young_length, base_time_ms,
+                                init_free_regions, target_pause_time_ms)) {
+            // The maximum young length will satisfy the target pause time.
+            // We are done so set min young length to this maximum length.
+            // The code after the loop will then set final_young_length using
+            // the value cached in the minimum length.
+            min_young_length = max_young_length;
+          } else {
+            // The maximum possible number of young regions will not fit within
+            // the target pause time so let's search....
 
             size_t diff = (max_young_length - min_young_length) / 2;
-            if (max_ok) {
-              min_young_length = max_young_length;
-              min_gc_eff = max_gc_eff;
-              min_ok = true;
+            max_young_length = min_young_length + diff;
+
+            while (max_young_length > min_young_length) {
+              if (predict_will_fit(max_young_length, base_time_ms,
+                                        init_free_regions, target_pause_time_ms)) {
+
+                // The current max young length will fit within the target
+                // pause time. Note we do not exit the loop here. By setting
+                // min = max, and then increasing the max below means that
+                // we will continue searching for an upper bound in the
+                // range [max..max+diff]
+                min_young_length = max_young_length;
+              }
+              diff = (max_young_length - min_young_length) / 2;
+              max_young_length = min_young_length + diff;
             }
-            max_young_length = min_young_length + diff;
+            // the above loop found a maximal young length that will fit
+            // within the target pause time.
           }
-
-          // the innermost loop found a config
-          guarantee( min_ok, "invariant" );
-          if (min_gc_eff > final_gc_eff) {
-            // it's the best config so far, so we'll keep it
-            final_gc_eff = min_gc_eff;
-            final_young_length = min_young_length;
-            final_so_length = so_length;
-            gc_eff_set = true;
-          }
+          assert(min_young_length <= abs_max_young_length, "just checking");
         }
-
-        // incremental the fixed S-O length and go around
-        so_length += so_length_incr;
+        final_young_length = min_young_length;
       }
-
-      // this is the end of the outermost loop and we need to decide
-      // what to do during the next iteration
-      if (pass == pass_type_coarse) {
-        // we just did the coarse pass (first iteration)
-
-        if (!gc_eff_set)
-          // we didn't find a feasible config so we'll just bail out; of
-          // course, it might be the case that we missed it; but I'd say
-          // it's a bit unlikely
-          done = true;
-        else {
-          // We did find a feasible config with optimal GC eff during
-          // the first pass. So the second pass we'll only consider the
-          // S-O lengths around that config with a fine increment.
-
-          guarantee( so_length_incr == so_coarse_increments, "invariant" );
-          guarantee( final_so_length >= min_so_length, "invariant" );
-
-#ifdef TRACE_CALC_YOUNG_CONFIG
-          // leave this in for debugging, just in case
-          gclog_or_tty->print_cr("  coarse pass: SO length " SIZE_FORMAT,
-                                 final_so_length);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-          from_so_length =
-            (final_so_length - min_so_length > so_coarse_increments) ?
-            final_so_length - so_coarse_increments + 1 : min_so_length;
-          to_so_length =
-            (max_so_length - final_so_length > so_coarse_increments) ?
-            final_so_length + so_coarse_increments - 1 : max_so_length;
-
-          pass = pass_type_fine;
-          so_length_incr = 1;
-        }
-      } else if (pass == pass_type_fine) {
-        // we just finished the second pass
-
-        if (!gc_eff_set) {
-          // we didn't find a feasible config (yes, it's possible;
-          // notice that, sometimes, we go directly into the fine
-          // iteration and skip the coarse one) so we bail out
-          done = true;
-        } else {
-          // We did find a feasible config with optimal GC eff
-          guarantee( so_length_incr == 1, "invariant" );
-
-          if (final_so_length == 0) {
-            // The config is of an empty S-O set, so we'll just bail out
-            done = true;
-          } else {
-            // we'll go around once more, setting the S-O length to 95%
-            // of the optimal
-            size_t new_so_length = 950 * final_so_length / 1000;
-
-#ifdef TRACE_CALC_YOUNG_CONFIG
-            // leave this in for debugging, just in case
-            gclog_or_tty->print_cr("  fine pass: SO length " SIZE_FORMAT
-                                   ", setting it to " SIZE_FORMAT,
-                                    final_so_length, new_so_length);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-            from_so_length = new_so_length;
-            to_so_length = new_so_length;
-            fine_so_length = final_so_length;
-
-            pass = pass_type_final;
-          }
-        }
-      } else if (pass == pass_type_final) {
-        // we just finished the final (third) pass
-
-        if (!gc_eff_set)
-          // we didn't find a feasible config, so we'll just use the one
-          // we found during the second pass, which we saved
-          final_so_length = fine_so_length;
-
-        // and we're done!
-        done = true;
-      } else {
-        guarantee( false, "should never reach here" );
-      }
-
-      // we now go around the outermost loop
     }
+    // and we're done!
 
     // we should have at least one region in the target young length
     _young_list_target_length =
         MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
-    if (final_so_length >= final_young_length)
-      // and we need to ensure that the S-O length is not greater than
-      // the target young length (this is being a bit careful)
-      final_so_length = 0;
-    _young_list_so_prefix_length = final_so_length;
-    guarantee( !_in_marking_window || !_last_full_young_gc ||
-               _young_list_so_prefix_length == 0, "invariant" );
 
     // let's keep an eye of how long we spend on this calculation
     // right now, I assume that we'll print it when we need it; we
@@ -752,142 +511,91 @@
     double end_time_sec = os::elapsedTime();
     double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
 
-#ifdef TRACE_CALC_YOUNG_CONFIG
+#ifdef TRACE_CALC_YOUNG_LENGTH
     // leave this in for debugging, just in case
-    gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
-                           ", SO = " SIZE_FORMAT ", "
-                           "elapsed %1.2lf ms, calcs: " SIZE_FORMAT " (%s%s) "
-                           SIZE_FORMAT SIZE_FORMAT,
+    gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
+                           "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
                            target_pause_time_ms,
-                           _young_list_target_length - _young_list_so_prefix_length,
-                           _young_list_so_prefix_length,
+                           _young_list_target_length
                            elapsed_time_ms,
-                           calculations,
                            full_young_gcs() ? "full" : "partial",
-                           should_initiate_conc_mark() ? " i-m" : "",
+                           during_initial_mark_pause() ? " i-m" : "",
                            _in_marking_window,
                            _in_marking_window_im);
-#endif // TRACE_CALC_YOUNG_CONFIG
+#endif // TRACE_CALC_YOUNG_LENGTH
 
     if (_young_list_target_length < _young_list_min_length) {
-      // bummer; this means that, if we do a pause when the optimal
-      // config dictates, we'll violate the pause spacing target (the
+      // bummer; this means that, if we do a pause when the maximal
+      // length dictates, we'll violate the pause spacing target (the
       // min length was calculate based on the application's current
       // alloc rate);
 
       // so, we have to bite the bullet, and allocate the minimum
       // number. We'll violate our target, but we just can't meet it.
 
-      size_t so_length = 0;
-      // a note further up explains why we do not want an S-O length
-      // during marking
-      if (!_in_marking_window && !_last_full_young_gc)
-        // but we can still try to see whether we can find an optimal
-        // S-O length
-        so_length = calculate_optimal_so_length(_young_list_min_length);
-
-#ifdef TRACE_CALC_YOUNG_CONFIG
+#ifdef TRACE_CALC_YOUNG_LENGTH
       // leave this in for debugging, just in case
       gclog_or_tty->print_cr("adjusted target length from "
-                             SIZE_FORMAT " to " SIZE_FORMAT
-                             ", SO " SIZE_FORMAT,
-                             _young_list_target_length, _young_list_min_length,
-                             so_length);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-      _young_list_target_length =
-        MAX2(_young_list_min_length, (size_t)1);
-      _young_list_so_prefix_length = so_length;
+                             SIZE_FORMAT " to " SIZE_FORMAT,
+                             _young_list_target_length, _young_list_min_length);
+#endif // TRACE_CALC_YOUNG_LENGTH
+
+      _young_list_target_length = _young_list_min_length;
     }
   } else {
     // we are in a partially-young mode or we've run out of regions (due
     // to evacuation failure)
 
-#ifdef TRACE_CALC_YOUNG_CONFIG
+#ifdef TRACE_CALC_YOUNG_LENGTH
     // leave this in for debugging, just in case
     gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
-                           ", SO " SIZE_FORMAT,
-                           _young_list_min_length, 0);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-    // we'll do the pause as soon as possible and with no S-O prefix
-    // (see above for the reasons behind the latter)
+                           _young_list_min_length);
+#endif // TRACE_CALC_YOUNG_LENGTH
+    // we'll do the pause as soon as possible by choosing the minimum
     _young_list_target_length =
       MAX2(_young_list_min_length, (size_t) 1);
-    _young_list_so_prefix_length = 0;
   }
 
   _rs_lengths_prediction = rs_lengths;
 }
 
-// This is used by: calculate_optimal_so_length(length). It returns
-// the GC eff and predicted pause time for a particular config
-void
-G1CollectorPolicy::predict_gc_eff(size_t young_length,
-                                  size_t so_length,
-                                  double base_time_ms,
-                                  double* ret_gc_eff,
-                                  double* ret_pause_time_ms) {
-  double so_time_ms = predict_scan_only_time_ms(so_length);
-  double accum_surv_rate_adj = 0.0;
-  if (so_length > 0)
-    accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
-  double accum_surv_rate =
-    accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
-  size_t bytes_to_copy =
-    (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
-  double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
-  double young_other_time_ms =
-                       predict_young_other_time_ms(young_length - so_length);
-  double pause_time_ms =
-                base_time_ms + so_time_ms + copy_time_ms + young_other_time_ms;
-  size_t reclaimed_bytes =
-    (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
-  double gc_eff = (double) reclaimed_bytes / pause_time_ms;
-
-  *ret_gc_eff = gc_eff;
-  *ret_pause_time_ms = pause_time_ms;
-}
-
-// This is used by: calculate_young_list_target_config(rs_length). It
-// returns the GC eff of a particular config. It returns false if that
-// config violates any of the end conditions of the search in the
-// calling method, or true upon success. The end conditions were put
-// here since it's called twice and it was best not to replicate them
-// in the caller. Also, passing the parameteres avoids having to
-// recalculate them in the innermost loop.
+// This is used by: calculate_young_list_target_length(rs_length). It
+// returns true iff:
+//   the predicted pause time for the given young list will not overflow
+//   the target pause time
+// and:
+//   the predicted amount of surviving data will not overflow the
+//   the amount of free space available for survivor regions.
+//
 bool
-G1CollectorPolicy::predict_gc_eff(size_t young_length,
-                                  size_t so_length,
-                                  double base_time_with_so_ms,
-                                  size_t init_free_regions,
-                                  double target_pause_time_ms,
-                                  double* ret_gc_eff) {
-  *ret_gc_eff = 0.0;
+G1CollectorPolicy::predict_will_fit(size_t young_length,
+                                    double base_time_ms,
+                                    size_t init_free_regions,
+                                    double target_pause_time_ms) {
 
   if (young_length >= init_free_regions)
     // end condition 1: not enough space for the young regions
     return false;
 
   double accum_surv_rate_adj = 0.0;
-  if (so_length > 0)
-    accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
   double accum_surv_rate =
     accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
+
   size_t bytes_to_copy =
     (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
+
   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
+
   double young_other_time_ms =
-                       predict_young_other_time_ms(young_length - so_length);
+                       predict_young_other_time_ms(young_length);
+
   double pause_time_ms =
-                   base_time_with_so_ms + copy_time_ms + young_other_time_ms;
+                   base_time_ms + copy_time_ms + young_other_time_ms;
 
   if (pause_time_ms > target_pause_time_ms)
     // end condition 2: over the target pause time
     return false;
 
-  size_t reclaimed_bytes =
-    (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
   size_t free_bytes =
                  (init_free_regions - young_length) * HeapRegion::GrainBytes;
 
@@ -896,9 +604,6 @@
     return false;
 
   // success!
-  double gc_eff = (double) reclaimed_bytes / pause_time_ms;
-  *ret_gc_eff = gc_eff;
-
   return true;
 }
 
@@ -915,11 +620,11 @@
 void G1CollectorPolicy::check_prediction_validity() {
   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 
-  size_t rs_lengths = _g1->young_list_sampled_rs_lengths();
+  size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   if (rs_lengths > _rs_lengths_prediction) {
     // add 10% to avoid having to recalculate often
     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
-    calculate_young_list_target_config(rs_lengths_prediction);
+    calculate_young_list_target_length(rs_lengths_prediction);
   }
 }
 
@@ -941,7 +646,7 @@
 
 #ifndef PRODUCT
 bool G1CollectorPolicy::verify_young_ages() {
-  HeapRegion* head = _g1->young_list_first_region();
+  HeapRegion* head = _g1->young_list()->first_region();
   return
     verify_young_ages(head, _short_lived_surv_rate_group);
   // also call verify_young_ages on any additional surv rate groups
@@ -1011,13 +716,13 @@
   set_full_young_gcs(true);
   _last_full_young_gc = false;
   _should_revert_to_full_young_gcs = false;
-  _should_initiate_conc_mark = false;
+  clear_initiate_conc_mark_if_possible();
+  clear_during_initial_mark_pause();
   _known_garbage_bytes = 0;
   _known_garbage_ratio = 0.0;
   _in_marking_window = false;
   _in_marking_window_im = false;
 
-  _short_lived_surv_rate_group->record_scan_only_prefix(0);
   _short_lived_surv_rate_group->start_adding_regions();
   // also call this on any additional surv rate groups
 
@@ -1027,11 +732,10 @@
   _prev_region_num_tenured = _region_num_tenured;
 
   _free_regions_at_end_of_collection = _g1->free_regions();
-  _scan_only_regions_at_end_of_collection = 0;
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   calculate_young_list_min_length();
-  calculate_young_list_target_config();
+  calculate_young_list_target_length();
  }
 
 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
@@ -1080,8 +784,6 @@
   for (int i = 0; i < _parallel_gc_threads; ++i) {
     _par_last_ext_root_scan_times_ms[i] = -666.0;
     _par_last_mark_stack_scan_times_ms[i] = -666.0;
-    _par_last_scan_only_times_ms[i] = -666.0;
-    _par_last_scan_only_regions_scanned[i] = -666.0;
     _par_last_update_rs_start_times_ms[i] = -666.0;
     _par_last_update_rs_times_ms[i] = -666.0;
     _par_last_update_rs_processed_buffers[i] = -666.0;
@@ -1104,50 +806,13 @@
   if (in_young_gc_mode())
     _last_young_gc_full = false;
 
-
   // do that for any other surv rate groups
   _short_lived_surv_rate_group->stop_adding_regions();
-  size_t short_lived_so_length = _young_list_so_prefix_length;
-  _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
-  tag_scan_only(short_lived_so_length);
-
-  if (G1UseSurvivorSpaces) {
-    _survivors_age_table.clear();
-  }
+  _survivors_age_table.clear();
 
   assert( verify_young_ages(), "region age verification" );
 }
 
-void G1CollectorPolicy::tag_scan_only(size_t short_lived_scan_only_length) {
-  // done in a way that it can be extended for other surv rate groups too...
-
-  HeapRegion* head = _g1->young_list_first_region();
-  bool finished_short_lived = (short_lived_scan_only_length == 0);
-
-  if (finished_short_lived)
-    return;
-
-  for (HeapRegion* curr = head;
-       curr != NULL;
-       curr = curr->get_next_young_region()) {
-    SurvRateGroup* surv_rate_group = curr->surv_rate_group();
-    int age = curr->age_in_surv_rate_group();
-
-    if (surv_rate_group == _short_lived_surv_rate_group) {
-      if ((size_t)age < short_lived_scan_only_length)
-        curr->set_scan_only();
-      else
-        finished_short_lived = true;
-    }
-
-
-    if (finished_short_lived)
-      return;
-  }
-
-  guarantee( false, "we should never reach here" );
-}
-
 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
   _mark_closure_time_ms = mark_closure_time_ms;
 }
@@ -1160,7 +825,8 @@
 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
                                                    mark_init_elapsed_time_ms) {
   _during_marking = true;
-  _should_initiate_conc_mark = false;
+  assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
+  clear_during_initial_mark_pause();
   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 }
 
@@ -1231,7 +897,6 @@
   }
   _n_pauses_at_mark_end = _n_pauses;
   _n_marks_since_last_pause++;
-  _conc_mark_initiated = false;
 }
 
 void
@@ -1241,7 +906,7 @@
     _last_full_young_gc = true;
     _in_marking_window = false;
     if (adaptive_young_list_length())
-      calculate_young_list_target_config();
+      calculate_young_list_target_length();
   }
 }
 
@@ -1427,17 +1092,24 @@
 #endif // PRODUCT
 
   if (in_young_gc_mode()) {
-    last_pause_included_initial_mark = _should_initiate_conc_mark;
+    last_pause_included_initial_mark = during_initial_mark_pause();
     if (last_pause_included_initial_mark)
       record_concurrent_mark_init_end_pre(0.0);
 
     size_t min_used_targ =
-      (_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta);
-
-    if (cur_used_bytes > min_used_targ) {
-      if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) {
-      } else if (!_g1->mark_in_progress() && !_last_full_young_gc) {
-        _should_initiate_conc_mark = true;
+      (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
+
+
+    if (!_g1->mark_in_progress() && !_last_full_young_gc) {
+      assert(!last_pause_included_initial_mark, "invariant");
+      if (cur_used_bytes > min_used_targ &&
+          cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
+        assert(!during_initial_mark_pause(), "we should not see this here");
+
+        // Note: this might have already been set, if during the last
+        // pause we decided to start a cycle but at the beginning of
+        // this pause we decided to postpone it. That's OK.
+        set_initiate_conc_mark_if_possible();
       }
     }
 
@@ -1469,6 +1141,7 @@
   size_t freed_bytes =
     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
+
   double survival_fraction =
     (double)surviving_bytes/
     (double)_collection_set_bytes_used_before;
@@ -1556,9 +1229,6 @@
 
   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
-  double scan_only_time = avg_value(_par_last_scan_only_times_ms);
-  double scan_only_regions_scanned =
-    sum_of_values(_par_last_scan_only_regions_scanned);
   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
   double update_rs_processed_buffers =
     sum_of_values(_par_last_update_rs_processed_buffers);
@@ -1568,7 +1238,7 @@
 
   double parallel_other_time = _cur_collection_par_time_ms -
     (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
-     scan_only_time + scan_rs_time + obj_copy_time + termination_time);
+     scan_rs_time + obj_copy_time + termination_time);
   if (update_stats) {
     MainBodySummary* body_summary = summary->main_body_summary();
     guarantee(body_summary != NULL, "should not be null!");
@@ -1579,7 +1249,6 @@
       body_summary->record_satb_drain_time_ms(0.0);
     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
-    body_summary->record_scan_only_time_ms(scan_only_time);
     body_summary->record_update_rs_time_ms(update_rs_time);
     body_summary->record_scan_rs_time_ms(scan_rs_time);
     body_summary->record_obj_copy_time_ms(obj_copy_time);
@@ -1633,7 +1302,7 @@
     else
       other_time_ms -=
         update_rs_time +
-        ext_root_scan_time + mark_stack_scan_time + scan_only_time +
+        ext_root_scan_time + mark_stack_scan_time +
         scan_rs_time + obj_copy_time;
   }
 
@@ -1658,9 +1327,6 @@
                           _par_last_update_rs_processed_buffers, true);
         print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
         print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
-        print_par_stats(2, "Scan-Only Scanning", _par_last_scan_only_times_ms);
-        print_par_buffers(3, "Scan-Only Regions",
-                          _par_last_scan_only_regions_scanned, true);
         print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
         print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
         print_par_stats(2, "Termination", _par_last_termination_times_ms);
@@ -1672,7 +1338,6 @@
                     (int)update_rs_processed_buffers);
         print_stats(1, "Ext Root Scanning", ext_root_scan_time);
         print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
-        print_stats(1, "Scan-Only Scanning", scan_only_time);
         print_stats(1, "Scan RS", scan_rs_time);
         print_stats(1, "Object Copying", obj_copy_time);
       }
@@ -1687,6 +1352,8 @@
     }
 #endif
     print_stats(1, "Other", other_time_ms);
+    print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
+
     for (int i = 0; i < _aux_num; ++i) {
       if (_cur_aux_times_set[i]) {
         char buffer[96];
@@ -1728,7 +1395,7 @@
 
   bool new_in_marking_window = _in_marking_window;
   bool new_in_marking_window_im = false;
-  if (_should_initiate_conc_mark) {
+  if (during_initial_mark_pause()) {
     new_in_marking_window = true;
     new_in_marking_window_im = true;
   }
@@ -1772,16 +1439,6 @@
       _cost_per_card_ms_seq->add(cost_per_card_ms);
     }
 
-    double cost_per_scan_only_region_ms = 0.0;
-    if (scan_only_regions_scanned > 0.0) {
-      cost_per_scan_only_region_ms =
-        scan_only_time / scan_only_regions_scanned;
-      if (_in_marking_window_im)
-        _cost_per_scan_only_region_ms_during_cm_seq->add(cost_per_scan_only_region_ms);
-      else
-        _cost_per_scan_only_region_ms_seq->add(cost_per_scan_only_region_ms);
-    }
-
     size_t cards_scanned = _g1->cards_scanned();
 
     double cost_per_entry_ms = 0.0;
@@ -1817,7 +1474,7 @@
     }
 
     double all_other_time_ms = pause_time_ms -
-      (update_rs_time + scan_only_time + scan_rs_time + obj_copy_time +
+      (update_rs_time + scan_rs_time + obj_copy_time +
        _mark_closure_time_ms + termination_time);
 
     double young_other_time_ms = 0.0;
@@ -1864,11 +1521,10 @@
     if (PREDICTIONS_VERBOSE) {
       gclog_or_tty->print_cr("");
       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
-                    "REGIONS %d %d %d %d "
+                    "REGIONS %d %d %d "
                     "PENDING_CARDS %d %d "
                     "CARDS_SCANNED %d %d "
                     "RS_LENGTHS %d %d "
-                    "SCAN_ONLY_SCAN %1.6lf %1.6lf "
                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
                     "SURVIVAL_RATIO %1.6lf %1.6lf "
                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
@@ -1881,12 +1537,10 @@
                     (last_pause_included_initial_mark) ? 1 : 0,
                     _recorded_region_num,
                     _recorded_young_regions,
-                    _recorded_scan_only_regions,
                     _recorded_non_young_regions,
                     _predicted_pending_cards, _pending_cards,
                     _predicted_cards_scanned, cards_scanned,
                     _predicted_rs_lengths, _max_rs_lengths,
-                    _predicted_scan_only_scan_time_ms, scan_only_time,
                     _predicted_rs_update_time_ms, update_rs_time,
                     _predicted_rs_scan_time_ms, scan_rs_time,
                     _predicted_survival_ratio, survival_ratio,
@@ -1911,14 +1565,12 @@
   _in_marking_window = new_in_marking_window;
   _in_marking_window_im = new_in_marking_window_im;
   _free_regions_at_end_of_collection = _g1->free_regions();
-  _scan_only_regions_at_end_of_collection = _g1->young_list_length();
   calculate_young_list_min_length();
-  calculate_young_list_target_config();
+  calculate_young_list_target_length();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
-  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0;
+  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
-
   // </NEW PREDICTION>
 
   _target_pause_time_ms = -1.0;
@@ -1932,7 +1584,7 @@
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
 
-  if (G1AdaptiveConcRefine) {
+  if (G1UseAdaptiveConcRefinement) {
     const int k_gy = 3, k_gr = 6;
     const double inc_k = 1.1, dec_k = 0.9;
 
@@ -1973,13 +1625,13 @@
   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  size_t young_num = g1h->young_list_length();
+  size_t young_num = g1h->young_list()->length();
   if (young_num == 0)
     return 0.0;
 
   young_num += adjustment;
   size_t pending_cards = predict_pending_cards();
-  size_t rs_lengths = g1h->young_list_sampled_rs_lengths() +
+  size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
                       predict_rs_length_diff();
   size_t card_num;
   if (full_young_gcs())
@@ -2063,31 +1715,22 @@
 void
 G1CollectorPolicy::start_recording_regions() {
   _recorded_rs_lengths            = 0;
-  _recorded_scan_only_regions     = 0;
   _recorded_young_regions         = 0;
   _recorded_non_young_regions     = 0;
 
 #if PREDICTIONS_VERBOSE
-  _predicted_rs_lengths           = 0;
-  _predicted_cards_scanned        = 0;
-
   _recorded_marked_bytes          = 0;
   _recorded_young_bytes           = 0;
   _predicted_bytes_to_copy        = 0;
+  _predicted_rs_lengths           = 0;
+  _predicted_cards_scanned        = 0;
 #endif // PREDICTIONS_VERBOSE
 }
 
 void
-G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) {
-  if (young) {
-    ++_recorded_young_regions;
-  } else {
-    ++_recorded_non_young_regions;
-  }
+G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
 #if PREDICTIONS_VERBOSE
-  if (young) {
-    _recorded_young_bytes += hr->used();
-  } else {
+  if (!young) {
     _recorded_marked_bytes += hr->max_live_bytes();
   }
   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
@@ -2098,12 +1741,37 @@
 }
 
 void
-G1CollectorPolicy::record_scan_only_regions(size_t scan_only_length) {
-  _recorded_scan_only_regions = scan_only_length;
+G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
+  assert(!hr->is_young(), "should not call this");
+  ++_recorded_non_young_regions;
+  record_cset_region_info(hr, false);
+}
+
+void
+G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
+  _recorded_young_regions = n_regions;
+}
+
+void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
+#if PREDICTIONS_VERBOSE
+  _recorded_young_bytes = bytes;
+#endif // PREDICTIONS_VERBOSE
+}
+
+void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
+  _recorded_rs_lengths = rs_lengths;
+}
+
+void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
+  _predicted_bytes_to_copy = bytes;
 }
 
 void
 G1CollectorPolicy::end_recording_regions() {
+  // The _predicted_pause_time_ms field is referenced in code
+  // not under PREDICTIONS_VERBOSE. Let's initialize it.
+  _predicted_pause_time_ms = -1.0;
+
 #if PREDICTIONS_VERBOSE
   _predicted_pending_cards = predict_pending_cards();
   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
@@ -2114,8 +1782,6 @@
       predict_non_young_card_num(_predicted_rs_lengths);
   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
 
-  _predicted_scan_only_scan_time_ms =
-    predict_scan_only_time_ms(_recorded_scan_only_regions);
   _predicted_rs_update_time_ms =
     predict_rs_update_time_ms(_g1->pending_card_num());
   _predicted_rs_scan_time_ms =
@@ -2130,7 +1796,6 @@
     predict_non_young_other_time_ms(_recorded_non_young_regions);
 
   _predicted_pause_time_ms =
-    _predicted_scan_only_scan_time_ms +
     _predicted_rs_update_time_ms +
     _predicted_rs_scan_time_ms +
     _predicted_object_copy_time_ms +
@@ -2147,7 +1812,13 @@
   if (predicted_time_ms > _expensive_region_limit_ms) {
     if (!in_young_gc_mode()) {
         set_full_young_gcs(true);
-      _should_initiate_conc_mark = true;
+        // We might want to do something different here. However,
+        // right now we don't support the non-generational G1 mode
+        // (and in fact we are planning to remove the associated code,
+        // see CR 6814390). So, let's leave it as is and this will be
+        // removed some time in the future
+        ShouldNotReachHere();
+        set_during_initial_mark_pause();
     } else
       // no point in doing another partial one
       _should_revert_to_full_young_gcs = true;
@@ -2269,7 +1940,7 @@
 }
 
 size_t G1CollectorPolicy::expansion_amount() {
-  if ((int)(recent_avg_pause_time_ratio() * 100.0) > G1GCPercent) {
+  if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
     // We will double the existing space, or take
     // G1ExpandByPercentOfAvailable % of the available expansion
     // space, whichever is smaller, bounded below by a minimum
@@ -2414,8 +2085,6 @@
                       body_summary->get_ext_root_scan_seq());
         print_summary(2, "Mark Stack Scanning",
                       body_summary->get_mark_stack_scan_seq());
-        print_summary(2, "Scan-Only Scanning",
-                      body_summary->get_scan_only_seq());
         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
         print_summary(2, "Termination", body_summary->get_termination_seq());
@@ -2425,7 +2094,6 @@
             body_summary->get_update_rs_seq(),
             body_summary->get_ext_root_scan_seq(),
             body_summary->get_mark_stack_scan_seq(),
-            body_summary->get_scan_only_seq(),
             body_summary->get_scan_rs_seq(),
             body_summary->get_obj_copy_seq(),
             body_summary->get_termination_seq()
@@ -2443,8 +2111,6 @@
                       body_summary->get_ext_root_scan_seq());
         print_summary(1, "Mark Stack Scanning",
                       body_summary->get_mark_stack_scan_seq());
-        print_summary(1, "Scan-Only Scanning",
-                      body_summary->get_scan_only_seq());
         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
       }
@@ -2470,7 +2136,6 @@
             body_summary->get_update_rs_seq(),
             body_summary->get_ext_root_scan_seq(),
             body_summary->get_mark_stack_scan_seq(),
-            body_summary->get_scan_only_seq(),
             body_summary->get_scan_rs_seq(),
             body_summary->get_obj_copy_seq()
           };
@@ -2564,7 +2229,7 @@
 G1CollectorPolicy::should_add_next_region_to_young_list() {
   assert(in_young_gc_mode(), "should be in young GC mode");
   bool ret;
-  size_t young_list_length = _g1->young_list_length();
+  size_t young_list_length = _g1->young_list()->length();
   size_t young_list_max_length = _young_list_target_length;
   if (G1FixedEdenSize) {
     young_list_max_length -= _max_survivor_regions;
@@ -2607,9 +2272,6 @@
 // Calculates survivor space parameters.
 void G1CollectorPolicy::calculate_survivors_policy()
 {
-  if (!G1UseSurvivorSpaces) {
-    return;
-  }
   if (G1FixedSurvivorSpaceSize == 0) {
     _max_survivor_regions = _young_list_target_length / SurvivorRatio;
   } else {
@@ -2628,16 +2290,9 @@
 G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
                                                                word_size) {
   assert(_g1->regions_accounted_for(), "Region leakage!");
-  // Initiate a pause when we reach the steady-state "used" target.
-  size_t used_hard = (_g1->capacity() / 100) * G1SteadyStateUsed;
-  size_t used_soft =
-   MAX2((_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta),
-        used_hard/2);
-  size_t used = _g1->used();
-
   double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 
-  size_t young_list_length = _g1->young_list_length();
+  size_t young_list_length = _g1->young_list()->length();
   size_t young_list_max_length = _young_list_target_length;
   if (G1FixedEdenSize) {
     young_list_max_length -= _max_survivor_regions;
@@ -2646,7 +2301,7 @@
 
   if (in_young_gc_mode()) {
     if (reached_target_length) {
-      assert( young_list_length > 0 && _g1->young_list_length() > 0,
+      assert( young_list_length > 0 && _g1->young_list()->length() > 0,
               "invariant" );
       _target_pause_time_ms = max_pause_time_ms;
       return true;
@@ -2681,6 +2336,50 @@
 #endif
 
 void
+G1CollectorPolicy::decide_on_conc_mark_initiation() {
+  // We are about to decide on whether this pause will be an
+  // initial-mark pause.
+
+  // First, during_initial_mark_pause() should not be already set. We
+  // will set it here if we have to. However, it should be cleared by
+  // the end of the pause (it's only set for the duration of an
+  // initial-mark pause).
+  assert(!during_initial_mark_pause(), "pre-condition");
+
+  if (initiate_conc_mark_if_possible()) {
+    // We had noticed on a previous pause that the heap occupancy has
+    // gone over the initiating threshold and we should start a
+    // concurrent marking cycle. So we might initiate one.
+
+    bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
+    if (!during_cycle) {
+      // The concurrent marking thread is not "during a cycle", i.e.,
+      // it has completed the last one. So we can go ahead and
+      // initiate a new cycle.
+
+      set_during_initial_mark_pause();
+
+      // And we can now clear initiate_conc_mark_if_possible() as
+      // we've already acted on it.
+      clear_initiate_conc_mark_if_possible();
+    } else {
+      // The concurrent marking thread is still finishing up the
+      // previous cycle. If we start one right now the two cycles
+      // overlap. In particular, the concurrent marking thread might
+      // be in the process of clearing the next marking bitmap (which
+      // we will use for the next cycle if we start one). Starting a
+      // cycle now will be bad given that parts of the marking
+      // information might get cleared by the marking thread. And we
+      // cannot wait for the marking thread to finish the cycle as it
+      // periodically yields while clearing the next marking bitmap
+      // and, if it's in a yield point, it's waiting for us to
+      // finish. So, at this point we will not start a cycle and we'll
+      // let the concurrent marking thread complete the last one.
+    }
+  }
+}
+
+void
 G1CollectorPolicy_BestRegionsFirst::
 record_collection_pause_start(double start_time_sec, size_t start_used) {
   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
@@ -2863,22 +2562,24 @@
   }
 }
 
-// Add the heap region to the collection set and return the conservative
-// estimate of the number of live bytes.
+// Add the heap region at the head of the non-incremental collection set
 void G1CollectorPolicy::
 add_to_collection_set(HeapRegion* hr) {
-  if (G1PrintRegions) {
-    gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
-                  "top "PTR_FORMAT", young %s",
-                  hr->hrs_index(), hr->bottom(), hr->end(),
-                  hr->top(), (hr->is_young()) ? "YES" : "NO");
+  assert(_inc_cset_build_state == Active, "Precondition");
+  assert(!hr->is_young(), "non-incremental add of young region");
+
+  if (G1PrintHeapRegions) {
+    gclog_or_tty->print_cr("added region to cset "
+                           "%d:["PTR_FORMAT", "PTR_FORMAT"], "
+                           "top "PTR_FORMAT", %s",
+                           hr->hrs_index(), hr->bottom(), hr->end(),
+                           hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
   }
 
   if (_g1->mark_in_progress())
     _g1->concurrent_mark()->registerCSetRegion(hr);
 
-  assert(!hr->in_collection_set(),
-              "should not already be in the CSet");
+  assert(!hr->in_collection_set(), "should not already be in the CSet");
   hr->set_in_collection_set(true);
   hr->set_next_in_collection_set(_collection_set);
   _collection_set = hr;
@@ -2887,10 +2588,230 @@
   _g1->register_region_with_in_cset_fast_test(hr);
 }
 
-void
-G1CollectorPolicy_BestRegionsFirst::
-choose_collection_set() {
-  double non_young_start_time_sec;
+// Initialize the per-collection-set information
+void G1CollectorPolicy::start_incremental_cset_building() {
+  assert(_inc_cset_build_state == Inactive, "Precondition");
+
+  _inc_cset_head = NULL;
+  _inc_cset_tail = NULL;
+  _inc_cset_size = 0;
+  _inc_cset_bytes_used_before = 0;
+
+  if (in_young_gc_mode()) {
+    _inc_cset_young_index = 0;
+  }
+
+  _inc_cset_max_finger = 0;
+  _inc_cset_recorded_young_bytes = 0;
+  _inc_cset_recorded_rs_lengths = 0;
+  _inc_cset_predicted_elapsed_time_ms = 0;
+  _inc_cset_predicted_bytes_to_copy = 0;
+  _inc_cset_build_state = Active;
+}
+
+void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
+  // This routine is used when:
+  // * adding survivor regions to the incremental cset at the end of an
+  //   evacuation pause,
+  // * adding the current allocation region to the incremental cset
+  //   when it is retired, and
+  // * updating existing policy information for a region in the
+  //   incremental cset via young list RSet sampling.
+  // Therefore this routine may be called at a safepoint by the
+  // VM thread, or in-between safepoints by mutator threads (when
+  // retiring the current allocation region) or a concurrent
+  // refine thread (RSet sampling).
+
+  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
+  size_t used_bytes = hr->used();
+
+  _inc_cset_recorded_rs_lengths += rs_length;
+  _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
+
+  _inc_cset_bytes_used_before += used_bytes;
+
+  // Cache the values we have added to the aggregated informtion
+  // in the heap region in case we have to remove this region from
+  // the incremental collection set, or it is updated by the
+  // rset sampling code
+  hr->set_recorded_rs_length(rs_length);
+  hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
+
+#if PREDICTIONS_VERBOSE
+  size_t bytes_to_copy = predict_bytes_to_copy(hr);
+  _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
+
+  // Record the number of bytes used in this region
+  _inc_cset_recorded_young_bytes += used_bytes;
+
+  // Cache the values we have added to the aggregated informtion
+  // in the heap region in case we have to remove this region from
+  // the incremental collection set, or it is updated by the
+  // rset sampling code
+  hr->set_predicted_bytes_to_copy(bytes_to_copy);
+#endif // PREDICTIONS_VERBOSE
+}
+
+void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
+  // This routine is currently only called as part of the updating of
+  // existing policy information for regions in the incremental cset that
+  // is performed by the concurrent refine thread(s) as part of young list
+  // RSet sampling. Therefore we should not be at a safepoint.
+
+  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
+  assert(hr->is_young(), "it should be");
+
+  size_t used_bytes = hr->used();
+  size_t old_rs_length = hr->recorded_rs_length();
+  double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
+
+  // Subtract the old recorded/predicted policy information for
+  // the given heap region from the collection set info.
+  _inc_cset_recorded_rs_lengths -= old_rs_length;
+  _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
+
+  _inc_cset_bytes_used_before -= used_bytes;
+
+  // Clear the values cached in the heap region
+  hr->set_recorded_rs_length(0);
+  hr->set_predicted_elapsed_time_ms(0);
+
+#if PREDICTIONS_VERBOSE
+  size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
+  _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
+
+  // Subtract the number of bytes used in this region
+  _inc_cset_recorded_young_bytes -= used_bytes;
+
+  // Clear the values cached in the heap region
+  hr->set_predicted_bytes_to_copy(0);
+#endif // PREDICTIONS_VERBOSE
+}
+
+void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
+  // Update the collection set information that is dependent on the new RS length
+  assert(hr->is_young(), "Precondition");
+
+  remove_from_incremental_cset_info(hr);
+  add_to_incremental_cset_info(hr, new_rs_length);
+}
+
+void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
+  assert( hr->is_young(), "invariant");
+  assert( hr->young_index_in_cset() == -1, "invariant" );
+  assert(_inc_cset_build_state == Active, "Precondition");
+
+  // We need to clear and set the cached recorded/cached collection set
+  // information in the heap region here (before the region gets added
+  // to the collection set). An individual heap region's cached values
+  // are calculated, aggregated with the policy collection set info,
+  // and cached in the heap region here (initially) and (subsequently)
+  // by the Young List sampling code.
+
+  size_t rs_length = hr->rem_set()->occupied();
+  add_to_incremental_cset_info(hr, rs_length);
+
+  HeapWord* hr_end = hr->end();
+  _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
+
+  assert(!hr->in_collection_set(), "invariant");
+  hr->set_in_collection_set(true);
+  assert( hr->next_in_collection_set() == NULL, "invariant");
+
+  _inc_cset_size++;
+  _g1->register_region_with_in_cset_fast_test(hr);
+
+  hr->set_young_index_in_cset((int) _inc_cset_young_index);
+  ++_inc_cset_young_index;
+}
+
+// Add the region at the RHS of the incremental cset
+void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
+  // We should only ever be appending survivors at the end of a pause
+  assert( hr->is_survivor(), "Logic");
+
+  // Do the 'common' stuff
+  add_region_to_incremental_cset_common(hr);
+
+  // Now add the region at the right hand side
+  if (_inc_cset_tail == NULL) {
+    assert(_inc_cset_head == NULL, "invariant");
+    _inc_cset_head = hr;
+  } else {
+    _inc_cset_tail->set_next_in_collection_set(hr);
+  }
+  _inc_cset_tail = hr;
+
+  if (G1PrintHeapRegions) {
+    gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
+                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
+                  "top "PTR_FORMAT", young %s",
+                  hr->hrs_index(), hr->bottom(), hr->end(),
+                  hr->top(), (hr->is_young()) ? "YES" : "NO");
+  }
+}
+
+// Add the region to the LHS of the incremental cset
+void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
+  // Survivors should be added to the RHS at the end of a pause
+  assert(!hr->is_survivor(), "Logic");
+
+  // Do the 'common' stuff
+  add_region_to_incremental_cset_common(hr);
+
+  // Add the region at the left hand side
+  hr->set_next_in_collection_set(_inc_cset_head);
+  if (_inc_cset_head == NULL) {
+    assert(_inc_cset_tail == NULL, "Invariant");
+    _inc_cset_tail = hr;
+  }
+  _inc_cset_head = hr;
+
+  if (G1PrintHeapRegions) {
+    gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
+                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
+                  "top "PTR_FORMAT", young %s",
+                  hr->hrs_index(), hr->bottom(), hr->end(),
+                  hr->top(), (hr->is_young()) ? "YES" : "NO");
+  }
+}
+
+#ifndef PRODUCT
+void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
+  assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
+
+  st->print_cr("\nCollection_set:");
+  HeapRegion* csr = list_head;
+  while (csr != NULL) {
+    HeapRegion* next = csr->next_in_collection_set();
+    assert(csr->in_collection_set(), "bad CS");
+    st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
+                 "age: %4d, y: %d, surv: %d",
+                        csr->bottom(), csr->end(),
+                        csr->top(),
+                        csr->prev_top_at_mark_start(),
+                        csr->next_top_at_mark_start(),
+                        csr->top_at_conc_mark_count(),
+                        csr->age_in_surv_rate_group_cond(),
+                        csr->is_young(),
+                        csr->is_survivor());
+    csr = next;
+  }
+}
+#endif // !PRODUCT
+
+bool
+G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
+  // Set this here - in case we're not doing young collections.
+  double non_young_start_time_sec = os::elapsedTime();
+
+  // The result that this routine will return. This will be set to
+  // false if:
+  // * we're doing a young or partially young collection and we
+  //   have added the youg regions to collection set, or
+  // * we add old regions to the collection set.
+  bool abandon_collection = true;
+
   start_recording_regions();
 
   guarantee(_target_pause_time_ms > -1.0
@@ -2943,47 +2864,79 @@
 
     if (G1PolicyVerbose > 0) {
       gclog_or_tty->print_cr("Adding %d young regions to the CSet",
-                    _g1->young_list_length());
+                    _g1->young_list()->length());
     }
+
     _young_cset_length  = 0;
     _last_young_gc_full = full_young_gcs() ? true : false;
+
     if (_last_young_gc_full)
       ++_full_young_pause_num;
     else
       ++_partial_young_pause_num;
-    hr = _g1->pop_region_from_young_list();
+
+    // The young list is laid with the survivor regions from the previous
+    // pause are appended to the RHS of the young list, i.e.
+    //   [Newly Young Regions ++ Survivors from last pause].
+
+    hr = _g1->young_list()->first_survivor_region();
     while (hr != NULL) {
-
-      assert( hr->young_index_in_cset() == -1, "invariant" );
-      assert( hr->age_in_surv_rate_group() != -1, "invariant" );
-      hr->set_young_index_in_cset((int) _young_cset_length);
-
-      ++_young_cset_length;
-      double predicted_time_ms = predict_region_elapsed_time_ms(hr, true);
-      time_remaining_ms -= predicted_time_ms;
-      predicted_pause_time_ms += predicted_time_ms;
-      assert(!hr->in_collection_set(), "invariant");
-      add_to_collection_set(hr);
-      record_cset_region(hr, true);
-      max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
-      if (G1PolicyVerbose > 0) {
-        gclog_or_tty->print_cr("  Added [" PTR_FORMAT ", " PTR_FORMAT") to CS.",
-                      hr->bottom(), hr->end());
-        gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
-                      max_live_bytes/K);
-      }
-      hr = _g1->pop_region_from_young_list();
+      assert(hr->is_survivor(), "badly formed young list");
+      hr->set_young();
+      hr = hr->get_next_young_region();
     }
 
-    record_scan_only_regions(_g1->young_list_scan_only_length());
+    // Clear the fields that point to the survivor list - they are
+    // all young now.
+    _g1->young_list()->clear_survivors();
+
+    if (_g1->mark_in_progress())
+      _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
+
+    _young_cset_length = _inc_cset_young_index;
+    _collection_set = _inc_cset_head;
+    _collection_set_size = _inc_cset_size;
+    _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
+
+    // For young regions in the collection set, we assume the worst
+    // case of complete survival
+    max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
+
+    time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
+    predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
+
+    // The number of recorded young regions is the incremental
+    // collection set's current size
+    set_recorded_young_regions(_inc_cset_size);
+    set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
+    set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
+#if PREDICTIONS_VERBOSE
+    set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
+#endif // PREDICTIONS_VERBOSE
+
+    if (G1PolicyVerbose > 0) {
+      gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
+                             _inc_cset_size);
+      gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
+                            max_live_bytes/K);
+    }
+
+    assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
+    if (_inc_cset_size > 0) {
+      assert(_collection_set != NULL, "Invariant");
+      abandon_collection = false;
+    }
 
     double young_end_time_sec = os::elapsedTime();
     _recorded_young_cset_choice_time_ms =
       (young_end_time_sec - young_start_time_sec) * 1000.0;
 
-    non_young_start_time_sec = os::elapsedTime();
-
-    if (_young_cset_length > 0 && _last_young_gc_full) {
+    // We are doing young collections so reset this.
+    non_young_start_time_sec = young_end_time_sec;
+
+    // Note we can use either _collection_set_size or
+    // _young_cset_length here
+    if (_collection_set_size > 0 && _last_young_gc_full) {
       // don't bother adding more regions...
       goto choose_collection_set_end;
     }
@@ -2993,6 +2946,11 @@
     bool should_continue = true;
     NumberSeq seq;
     double avg_prediction = 100000000000000000.0; // something very large
+
+    // Save the current size of the collection set to detect
+    // if we actually added any old regions.
+    size_t n_young_regions = _collection_set_size;
+
     do {
       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
                                                       avg_prediction);
@@ -3001,7 +2959,7 @@
         time_remaining_ms -= predicted_time_ms;
         predicted_pause_time_ms += predicted_time_ms;
         add_to_collection_set(hr);
-        record_cset_region(hr, false);
+        record_non_young_cset_region(hr);
         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
         if (G1PolicyVerbose > 0) {
           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
@@ -3019,9 +2977,17 @@
     if (!adaptive_young_list_length() &&
         _collection_set_size < _young_list_fixed_length)
       _should_revert_to_full_young_gcs  = true;
+
+    if (_collection_set_size > n_young_regions) {
+      // We actually added old regions to the collection set
+      // so we are not abandoning this collection.
+      abandon_collection = false;
+    }
   }
 
 choose_collection_set_end:
+  stop_incremental_cset_building();
+
   count_CS_bytes_used();
 
   end_recording_regions();
@@ -3029,6 +2995,8 @@
   double non_young_end_time_sec = os::elapsedTime();
   _recorded_non_young_cset_choice_time_ms =
     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
+
+  return abandon_collection;
 }
 
 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,6 @@
   define_num_seq(parallel) // parallel only
     define_num_seq(ext_root_scan)
     define_num_seq(mark_stack_scan)
-    define_num_seq(scan_only)
     define_num_seq(update_rs)
     define_num_seq(scan_rs)
     define_num_seq(scan_new_refs) // Only for temp use; added to
@@ -174,8 +173,6 @@
 
   double* _par_last_ext_root_scan_times_ms;
   double* _par_last_mark_stack_scan_times_ms;
-  double* _par_last_scan_only_times_ms;
-  double* _par_last_scan_only_regions_scanned;
   double* _par_last_update_rs_start_times_ms;
   double* _par_last_update_rs_times_ms;
   double* _par_last_update_rs_processed_buffers;
@@ -196,7 +193,6 @@
   bool _adaptive_young_list_length;
   size_t _young_list_min_length;
   size_t _young_list_target_length;
-  size_t _young_list_so_prefix_length;
   size_t _young_list_fixed_length;
 
   size_t _young_cset_length;
@@ -215,6 +211,8 @@
   SurvRateGroup*        _survivor_surv_rate_group;
   // add here any more surv rate groups
 
+  double                _gc_overhead_perc;
+
   bool during_marking() {
     return _during_marking;
   }
@@ -232,7 +230,6 @@
   TruncatedSeq* _pending_card_diff_seq;
   TruncatedSeq* _rs_length_diff_seq;
   TruncatedSeq* _cost_per_card_ms_seq;
-  TruncatedSeq* _cost_per_scan_only_region_ms_seq;
   TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
   TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
   TruncatedSeq* _cost_per_entry_ms_seq;
@@ -247,19 +244,16 @@
   TruncatedSeq* _rs_lengths_seq;
 
   TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
-  TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq;
 
   TruncatedSeq* _young_gc_eff_seq;
 
   TruncatedSeq* _max_conc_overhead_seq;
 
   size_t _recorded_young_regions;
-  size_t _recorded_scan_only_regions;
   size_t _recorded_non_young_regions;
   size_t _recorded_region_num;
 
   size_t _free_regions_at_end_of_collection;
-  size_t _scan_only_regions_at_end_of_collection;
 
   size_t _recorded_rs_lengths;
   size_t _max_rs_lengths;
@@ -275,7 +269,6 @@
   double _predicted_survival_ratio;
   double _predicted_rs_update_time_ms;
   double _predicted_rs_scan_time_ms;
-  double _predicted_scan_only_scan_time_ms;
   double _predicted_object_copy_time_ms;
   double _predicted_constant_other_time_ms;
   double _predicted_young_other_time_ms;
@@ -342,8 +335,6 @@
   bool verify_young_ages();
 #endif // PRODUCT
 
-  void tag_scan_only(size_t short_lived_scan_only_length);
-
   double get_new_prediction(TruncatedSeq* seq) {
     return MAX2(seq->davg() + sigma() * seq->dsd(),
                 seq->davg() * confidence_factor(seq->num()));
@@ -429,23 +420,6 @@
         get_new_prediction(_partially_young_cost_per_entry_ms_seq);
   }
 
-  double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) {
-    if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3)
-      return 1.5 * (double) scan_only_region_num *
-        get_new_prediction(_cost_per_scan_only_region_ms_seq);
-    else
-      return (double) scan_only_region_num *
-        get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq);
-  }
-
-  double predict_scan_only_time_ms(size_t scan_only_region_num) {
-    if (_in_marking_window_im)
-      return predict_scan_only_time_ms_during_cm(scan_only_region_num);
-    else
-      return (double) scan_only_region_num *
-        get_new_prediction(_cost_per_scan_only_region_ms_seq);
-  }
-
   double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
     if (_cost_per_byte_ms_during_cm_seq->num() < 3)
       return 1.1 * (double) bytes_to_copy *
@@ -488,24 +462,21 @@
   size_t predict_bytes_to_copy(HeapRegion* hr);
   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
 
-  // for use by: calculate_optimal_so_length(length)
-  void predict_gc_eff(size_t young_region_num,
-                      size_t so_length,
-                      double base_time_ms,
-                      double *gc_eff,
-                      double *pause_time_ms);
-
-  // for use by: calculate_young_list_target_config(rs_length)
-  bool predict_gc_eff(size_t young_region_num,
-                      size_t so_length,
-                      double base_time_with_so_ms,
-                      size_t init_free_regions,
-                      double target_pause_time_ms,
-                      double* gc_eff);
+    // for use by: calculate_young_list_target_length(rs_length)
+  bool predict_will_fit(size_t young_region_num,
+                        double base_time_ms,
+                        size_t init_free_regions,
+                        double target_pause_time_ms);
 
   void start_recording_regions();
-  void record_cset_region(HeapRegion* hr, bool young);
-  void record_scan_only_regions(size_t scan_only_length);
+  void record_cset_region_info(HeapRegion* hr, bool young);
+  void record_non_young_cset_region(HeapRegion* hr);
+
+  void set_recorded_young_regions(size_t n_regions);
+  void set_recorded_young_bytes(size_t bytes);
+  void set_recorded_rs_lengths(size_t rs_lengths);
+  void set_predicted_bytes_to_copy(size_t bytes);
+
   void end_recording_regions();
 
   void record_vtime_diff_ms(double vtime_diff_ms) {
@@ -636,11 +607,74 @@
   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
-  // current collection set.
+  // current collection set. Set from the incrementally built collection
+  // set at the start of the pause.
   HeapRegion* _collection_set;
+
+  // The number of regions in the collection set. Set from the incrementally
+  // built collection set at the start of an evacuation pause.
   size_t _collection_set_size;
+
+  // The number of bytes in the collection set before the pause. Set from
+  // the incrementally built collection set at the start of an evacuation
+  // pause.
   size_t _collection_set_bytes_used_before;
 
+  // The associated information that is maintained while the incremental
+  // collection set is being built with young regions. Used to populate
+  // the recorded info for the evacuation pause.
+
+  enum CSetBuildType {
+    Active,             // We are actively building the collection set
+    Inactive            // We are not actively building the collection set
+  };
+
+  CSetBuildType _inc_cset_build_state;
+
+  // The head of the incrementally built collection set.
+  HeapRegion* _inc_cset_head;
+
+  // The tail of the incrementally built collection set.
+  HeapRegion* _inc_cset_tail;
+
+  // The number of regions in the incrementally built collection set.
+  // Used to set _collection_set_size at the start of an evacuation
+  // pause.
+  size_t _inc_cset_size;
+
+  // Used as the index in the surving young words structure
+  // which tracks the amount of space, for each young region,
+  // that survives the pause.
+  size_t _inc_cset_young_index;
+
+  // The number of bytes in the incrementally built collection set.
+  // Used to set _collection_set_bytes_used_before at the start of
+  // an evacuation pause.
+  size_t _inc_cset_bytes_used_before;
+
+  // Used to record the highest end of heap region in collection set
+  HeapWord* _inc_cset_max_finger;
+
+  // The number of recorded used bytes in the young regions
+  // of the collection set. This is the sum of the used() bytes
+  // of retired young regions in the collection set.
+  size_t _inc_cset_recorded_young_bytes;
+
+  // The RSet lengths recorded for regions in the collection set
+  // (updated by the periodic sampling of the regions in the
+  // young list/collection set).
+  size_t _inc_cset_recorded_rs_lengths;
+
+  // The predicted elapsed time it will take to collect the regions
+  // in the collection set (updated by the periodic sampling of the
+  // regions in the young list/collection set).
+  double _inc_cset_predicted_elapsed_time_ms;
+
+  // The predicted bytes to copy for the regions in the collection
+  // set (updated by the periodic sampling of the regions in the
+  // young list/collection set).
+  size_t _inc_cset_predicted_bytes_to_copy;
+
   // Info about marking.
   int _n_marks; // Sticky at 2, so we know when we've done at least 2.
 
@@ -722,11 +756,31 @@
 
   size_t _n_marks_since_last_pause;
 
-  // True iff CM has been initiated.
-  bool _conc_mark_initiated;
+  // At the end of a pause we check the heap occupancy and we decide
+  // whether we will start a marking cycle during the next pause. If
+  // we decide that we want to do that, we will set this parameter to
+  // true. So, this parameter will stay true between the end of a
+  // pause and the beginning of a subsequent pause (not necessarily
+  // the next one, see the comments on the next field) when we decide
+  // that we will indeed start a marking cycle and do the initial-mark
+  // work.
+  volatile bool _initiate_conc_mark_if_possible;
 
-  // True iff CM should be initiated
-  bool _should_initiate_conc_mark;
+  // If initiate_conc_mark_if_possible() is set at the beginning of a
+  // pause, it is a suggestion that the pause should start a marking
+  // cycle by doing the initial-mark work. However, it is possible
+  // that the concurrent marking thread is still finishing up the
+  // previous marking cycle (e.g., clearing the next marking
+  // bitmap). If that is the case we cannot start a new cycle and
+  // we'll have to wait for the concurrent marking thread to finish
+  // what it is doing. In this case we will postpone the marking cycle
+  // initiation decision for the next pause. When we eventually decide
+  // to start a cycle, we will set _during_initial_mark_pause which
+  // will stay true until the end of the initial-mark pause and it's
+  // the condition that indicates that a pause is doing the
+  // initial-mark work.
+  volatile bool _during_initial_mark_pause;
+
   bool _should_revert_to_full_young_gcs;
   bool _last_full_young_gc;
 
@@ -739,9 +793,8 @@
   double _mark_closure_time_ms;
 
   void   calculate_young_list_min_length();
-  void   calculate_young_list_target_config();
-  void   calculate_young_list_target_config(size_t rs_lengths);
-  size_t calculate_optimal_so_length(size_t young_list_length);
+  void   calculate_young_list_target_length();
+  void   calculate_young_list_target_length(size_t rs_lengths);
 
 public:
 
@@ -846,11 +899,6 @@
     _par_last_mark_stack_scan_times_ms[worker_i] = ms;
   }
 
-  void record_scan_only_time(int worker_i, double ms, int n) {
-    _par_last_scan_only_times_ms[worker_i] = ms;
-    _par_last_scan_only_regions_scanned[worker_i] = (double) n;
-  }
-
   void record_satb_drain_time(double ms) {
     _cur_satb_drain_time_ms = ms;
     _satb_drain_time_set    = true;
@@ -965,23 +1013,82 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  virtual void choose_collection_set() = 0;
-
-  void clear_collection_set() { _collection_set = NULL; }
+  virtual bool choose_collection_set() = 0;
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
   HeapRegion* collection_set() { return _collection_set; }
 
+  void clear_collection_set() { _collection_set = NULL; }
+
   // The number of elements in the current collection set.
   size_t collection_set_size() { return _collection_set_size; }
 
   // Add "hr" to the CS.
   void add_to_collection_set(HeapRegion* hr);
 
-  bool should_initiate_conc_mark()      { return _should_initiate_conc_mark; }
-  void set_should_initiate_conc_mark()  { _should_initiate_conc_mark = true; }
-  void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; }
+  // Incremental CSet Support
+
+  // The head of the incrementally built collection set.
+  HeapRegion* inc_cset_head() { return _inc_cset_head; }
+
+  // The tail of the incrementally built collection set.
+  HeapRegion* inc_set_tail() { return _inc_cset_tail; }
+
+  // The number of elements in the incrementally built collection set.
+  size_t inc_cset_size() { return _inc_cset_size; }
+
+  // Initialize incremental collection set info.
+  void start_incremental_cset_building();
+
+  void clear_incremental_cset() {
+    _inc_cset_head = NULL;
+    _inc_cset_tail = NULL;
+  }
+
+  // Stop adding regions to the incremental collection set
+  void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
+
+  // Add/remove information about hr to the aggregated information
+  // for the incrementally built collection set.
+  void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
+  void remove_from_incremental_cset_info(HeapRegion* hr);
+
+  // Update information about hr in the aggregated information for
+  // the incrementally built collection set.
+  void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
+
+private:
+  // Update the incremental cset information when adding a region
+  // (should not be called directly).
+  void add_region_to_incremental_cset_common(HeapRegion* hr);
+
+public:
+  // Add hr to the LHS of the incremental collection set.
+  void add_region_to_incremental_cset_lhs(HeapRegion* hr);
+
+  // Add hr to the RHS of the incremental collection set.
+  void add_region_to_incremental_cset_rhs(HeapRegion* hr);
+
+#ifndef PRODUCT
+  void print_collection_set(HeapRegion* list_head, outputStream* st);
+#endif // !PRODUCT
+
+  bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
+  void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
+  void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
+
+  bool during_initial_mark_pause()      { return _during_initial_mark_pause;  }
+  void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
+  void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
+
+  // This is called at the very beginning of an evacuation pause (it
+  // has to be the first thing that the pause does). If
+  // initiate_conc_mark_if_possible() is true, and the concurrent
+  // marking thread has completed its work during the previous cycle,
+  // it will set during_initial_mark_pause() to so that the pause does
+  // the initial-mark work and start a marking cycle.
+  void decide_on_conc_mark_initiation();
 
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
@@ -1157,7 +1264,7 @@
   // If the estimated is less then desirable, resize if possible.
   void expand_if_possible(size_t numRegions);
 
-  virtual void choose_collection_set();
+  virtual bool choose_collection_set();
   virtual void record_collection_pause_start(double start_time_sec,
                                              size_t start_used);
   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Thu May 13 18:26:35 2010 -0700
@@ -88,13 +88,13 @@
     //     the time slice than what's allowed)
     //   consolidate the two entries with the minimum gap between them
     //     (this might allow less GC time than what's allowed)
-    guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
-              "array full, currently we can't recover unless +G1ForgetfulMMUTracker");
+    guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker,
+              "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker");
     // In the case where ScavengeALot is true, such overflow is not
     // uncommon; in such cases, we can, without much loss of precision
     // or performance (we are GC'ing most of the time anyway!),
     // simply overwrite the oldest entry in the tracker: this
-    // is also the behaviour when G1ForgetfulMMUTracker is enabled.
+    // is also the behaviour when G1UseFixedWindowMMUTracker is enabled.
     _head_index = trim_index(_head_index + 1);
     assert(_head_index == _tail_index, "Because we have a full circular buffer");
     _tail_index = trim_index(_tail_index + 1);
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Thu May 13 18:26:35 2010 -0700
@@ -101,7 +101,7 @@
   // If the array is full, an easy fix is to look for the pauses with
   // the shortest gap between them and consolidate them.
   // For now, we have taken the expedient alternative of forgetting
-  // the oldest entry in the event that +G1ForgetfulMMUTracker, thus
+  // the oldest entry in the event that +G1UseFixedWindowMMUTracker, thus
   // potentially violating MMU specs for some time thereafter.
 
   G1MMUTrackerQueueElem _array[QueueLength];
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,12 @@
                                       bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
+  SharedHeap* sh = SharedHeap::heap();
+#ifdef ASSERT
+  if (sh->collector_policy()->should_clear_all_soft_refs()) {
+    assert(clear_all_softrefs, "Policy should have been checked earler");
+  }
+#endif
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
@@ -44,7 +50,6 @@
 
   // Increment the invocation count for the permanent generation, since it is
   // implicitly collected whenever we do a full mark sweep collection.
-  SharedHeap* sh = SharedHeap::heap();
   sh->perm_gen()->stat_record()->invocations++;
 
   bool marked_for_unloading = false;
@@ -101,6 +106,8 @@
 
   GenMarkSweep::_marking_stack =
     new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
+  GenMarkSweep::_objarray_stack =
+    new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
 
   int size = SystemDictionary::number_of_classes() * 2;
   GenMarkSweep::_revisit_klass_stack =
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu May 13 18:26:35 2010 -0700
@@ -467,7 +467,7 @@
     // and they are causing failures. When we resolve said race
     // conditions, we'll revert back to parallel remembered set
     // updating and scanning. See CRs 6677707 and 6677708.
-    if (G1ParallelRSetUpdatingEnabled || (worker_i == 0)) {
+    if (G1UseParallelRSetUpdating || (worker_i == 0)) {
       updateRS(worker_i);
       scanNewRefsRS(oc, worker_i);
     } else {
@@ -476,7 +476,7 @@
       _g1p->record_update_rs_time(worker_i, 0.0);
       _g1p->record_scan_new_refs_time(worker_i, 0.0);
     }
-    if (G1ParallelRSetScanningEnabled || (worker_i == 0)) {
+    if (G1UseParallelRSetScanning || (worker_i == 0)) {
       scanRS(oc, worker_i);
     } else {
       _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Thu May 13 18:26:35 2010 -0700
@@ -35,7 +35,7 @@
 
 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
   assert(pre_val->is_oop_or_null(true), "Error");
-  if (!JavaThread::satb_mark_queue_set().active()) return;
+  if (!JavaThread::satb_mark_queue_set().is_active()) return;
   Thread* thr = Thread::current();
   if (thr->is_Java_thread()) {
     JavaThread* jt = (JavaThread*)thr;
@@ -51,7 +51,7 @@
 G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
                                                     oop new_val,
                                                     JavaThread* jt) {
-  if (!JavaThread::satb_mark_queue_set().active()) return;
+  if (!JavaThread::satb_mark_queue_set().is_active()) return;
   T heap_oop = oopDesc::load_heap_oop(field);
   if (!oopDesc::is_null(heap_oop)) {
     oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
@@ -62,7 +62,7 @@
 
 template <class T> void
 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
-  if (!JavaThread::satb_mark_queue_set().active()) return;
+  if (!JavaThread::satb_mark_queue_set().is_active()) return;
   T* elem_ptr = dst;
   for (int i = 0; i < count; i++, elem_ptr++) {
     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,24 +28,15 @@
 
 #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
                                                                             \
-  product(intx, G1ParallelGCAllocBufferSize, 8*K,                           \
-          "Size of parallel G1 allocation buffers in to-space.")            \
-                                                                            \
   product(intx, G1ConfidencePercent, 50,                                    \
           "Confidence level for MMU/pause predictions")                     \
                                                                             \
   develop(intx, G1MarkingOverheadPercent, 0,                                \
           "Overhead of concurrent marking")                                 \
                                                                             \
-  product(uintx, G1YoungGenSize, 0,                                         \
-          "Size of the G1 young generation, 0 is the adaptive policy")      \
-                                                                            \
   develop(bool, G1Gen, true,                                                \
           "If true, it will enable the generational G1")                    \
                                                                             \
-  develop(intx, G1GCPercent, 10,                                            \
-          "The desired percent time spent on GC")                           \
-                                                                            \
   develop(intx, G1PolicyVerbose, 0,                                         \
           "The verbosity level on G1 policy decisions")                     \
                                                                             \
@@ -70,7 +61,7 @@
   develop(intx, G1PausesBtwnConcMark, -1,                                   \
           "If positive, fixed number of pauses between conc markings")      \
                                                                             \
-  diagnostic(bool, G1SummarizeConcurrentMark, false,                        \
+  diagnostic(bool, G1SummarizeConcMark, false,                              \
           "Summarize concurrent mark info")                                 \
                                                                             \
   diagnostic(bool, G1SummarizeRSetStats, false,                             \
@@ -85,12 +76,9 @@
   diagnostic(bool, G1SummarizeZFStats, false,                               \
           "Summarize zero-filling info")                                    \
                                                                             \
-  diagnostic(bool, G1TraceConcurrentRefinement, false,                      \
+  diagnostic(bool, G1TraceConcRefinement, false,                            \
           "Trace G1 concurrent refinement")                                 \
                                                                             \
-  product(intx, G1MarkStackSize, 2 * 1024 * 1024,                           \
-          "Size of the mark stack for concurrent marking.")                 \
-                                                                            \
   product(intx, G1MarkRegionStackSize, 1024 * 1024,                         \
           "Size of the region stack for concurrent marking.")               \
                                                                             \
@@ -100,20 +88,13 @@
   develop(intx, G1ConcZFMaxRegions, 1,                                      \
           "Stop zero-filling when # of zf'd regions reaches")               \
                                                                             \
-  product(intx, G1SteadyStateUsed, 90,                                      \
-          "If non-0, try to maintain 'used' at this pct (of max)")          \
-                                                                            \
-  product(intx, G1SteadyStateUsedDelta, 30,                                 \
-          "If G1SteadyStateUsed is non-0, then do pause this number of "    \
-          "of percentage points earlier if no marking is in progress.")     \
-                                                                            \
   develop(bool, G1SATBBarrierPrintNullPreVals, false,                       \
           "If true, count frac of ptr writes with null pre-vals.")          \
                                                                             \
-  product(intx, G1SATBLogBufferSize, 1*K,                                   \
+  product(intx, G1SATBBufferSize, 1*K,                                      \
           "Number of entries in an SATB log buffer.")                       \
                                                                             \
-  product(intx, G1SATBProcessCompletedThreshold, 20,                        \
+  develop(intx, G1SATBProcessCompletedThreshold, 20,                        \
           "Number of completed buffers that triggers log processing.")      \
                                                                             \
   develop(intx, G1ExtraRegionSurvRate, 33,                                  \
@@ -127,7 +108,7 @@
   develop(bool, G1SATBPrintStubs, false,                                    \
           "If true, print generated stubs for the SATB barrier")            \
                                                                             \
-  product(intx, G1ExpandByPercentOfAvailable, 20,                           \
+  experimental(intx, G1ExpandByPercentOfAvailable, 20,                      \
           "When expanding, % of uncommitted space to claim.")               \
                                                                             \
   develop(bool, G1RSBarrierRegionFilter, true,                              \
@@ -165,36 +146,36 @@
   product(intx, G1UpdateBufferSize, 256,                                    \
           "Size of an update buffer")                                       \
                                                                             \
-  product(intx, G1ConcRefineYellowZone, 0,                                  \
+  product(intx, G1ConcRefinementYellowZone, 0,                              \
           "Number of enqueued update buffers that will "                    \
           "trigger concurrent processing. Will be selected ergonomically "  \
           "by default.")                                                    \
                                                                             \
-  product(intx, G1ConcRefineRedZone, 0,                                     \
+  product(intx, G1ConcRefinementRedZone, 0,                                 \
           "Maximum number of enqueued update buffers before mutator "       \
           "threads start processing new ones instead of enqueueing them. "  \
           "Will be selected ergonomically by default. Zero will disable "   \
           "concurrent processing.")                                         \
                                                                             \
-  product(intx, G1ConcRefineGreenZone, 0,                                   \
+  product(intx, G1ConcRefinementGreenZone, 0,                               \
           "The number of update buffers that are left in the queue by the " \
           "concurrent processing threads. Will be selected ergonomically "  \
           "by default.")                                                    \
                                                                             \
-  product(intx, G1ConcRefineServiceInterval, 300,                           \
+  product(intx, G1ConcRefinementServiceIntervalMillis, 300,                 \
           "The last concurrent refinement thread wakes up every "           \
           "specified number of milliseconds to do miscellaneous work.")     \
                                                                             \
-  product(intx, G1ConcRefineThresholdStep, 0,                               \
+  product(intx, G1ConcRefinementThresholdStep, 0,                           \
           "Each time the rset update queue increases by this amount "       \
           "activate the next refinement thread if available. "              \
           "Will be selected ergonomically by default.")                     \
                                                                             \
-  product(intx, G1RSUpdatePauseFractionPercent, 10,                         \
+  product(intx, G1RSetUpdatingPauseTimePercent, 10,                         \
           "A target percentage of time that is allowed to be spend on "     \
           "process RS update buffers during the collection pause.")         \
                                                                             \
-  product(bool, G1AdaptiveConcRefine, true,                                 \
+  product(bool, G1UseAdaptiveConcRefinement, true,                          \
           "Select green, yellow and red zones adaptively to meet the "      \
           "the pause requirements.")                                        \
                                                                             \
@@ -245,15 +226,11 @@
           "the number of regions for which we'll print a surv rate "        \
           "summary.")                                                       \
                                                                             \
-  product(bool, G1UseScanOnlyPrefix, false,                                 \
-          "It determines whether the system will calculate an optimum "     \
-          "scan-only set.")                                                 \
-                                                                            \
-  product(intx, G1MinReservePercent, 10,                                    \
+  product(intx, G1ReservePercent, 10,                                       \
           "It determines the minimum reserve we should have in the heap "   \
           "to minimize the probability of promotion failure.")              \
                                                                             \
-  diagnostic(bool, G1PrintRegions, false,                                   \
+  diagnostic(bool, G1PrintHeapRegions, false,                               \
           "If set G1 will print information on which regions are being "    \
           "allocated and which are reclaimed.")                             \
                                                                             \
@@ -263,9 +240,6 @@
   develop(bool, G1HRRSFlushLogBuffersOnVerify, false,                       \
           "Forces flushing of log buffers before verification.")            \
                                                                             \
-  product(bool, G1UseSurvivorSpaces, true,                                  \
-          "When true, use survivor space.")                                 \
-                                                                            \
   develop(bool, G1FailOnFPError, false,                                     \
           "When set, G1 will fail when it encounters an FP 'error', "       \
           "so as to allow debugging")                                       \
@@ -280,21 +254,21 @@
           "If non-0 is the size of the G1 survivor space, "                 \
           "otherwise SurvivorRatio is used to determine the size")          \
                                                                             \
-  product(bool, G1ForgetfulMMUTracker, false,                               \
+  product(bool, G1UseFixedWindowMMUTracker, false,                          \
           "If the MMU tracker's memory is full, forget the oldest entry")   \
                                                                             \
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
-  experimental(bool, G1ParallelRSetUpdatingEnabled, false,                  \
+  experimental(bool, G1UseParallelRSetUpdating, true,                       \
           "Enables the parallelization of remembered set updating "         \
           "during evacuation pauses")                                       \
                                                                             \
-  experimental(bool, G1ParallelRSetScanningEnabled, false,                  \
+  experimental(bool, G1UseParallelRSetScanning, true,                       \
           "Enables the parallelization of remembered set scanning "         \
           "during evacuation pauses")                                       \
                                                                             \
-  product(uintx, G1ParallelRSetThreads, 0,                                  \
+  product(uintx, G1ConcRefinementThreads, 0,                                \
           "If non-0 is the number of parallel rem set update threads, "     \
           "otherwise the value is determined ergonomically.")               \
                                                                             \
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,6 +75,16 @@
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 
+  void print_object(outputStream* out, oop obj) {
+#ifdef PRODUCT
+    klassOop k = obj->klass();
+    const char* class_name = instanceKlass::cast(k)->external_name();
+    out->print_cr("class name %s", class_name);
+#else // PRODUCT
+    obj->print_on(out);
+#endif // PRODUCT
+  }
+
   template <class T> void do_oop_work(T* p) {
     assert(_containing_obj != NULL, "Precondition");
     assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
@@ -90,21 +100,29 @@
           gclog_or_tty->print_cr("----------");
         }
         if (!_g1h->is_in_closed_subset(obj)) {
-          gclog_or_tty->print_cr("Field "PTR_FORMAT
-                        " of live obj "PTR_FORMAT
-                        " points to obj "PTR_FORMAT
-                        " not in the heap.",
-                        p, (void*) _containing_obj, (void*) obj);
-        } else {
+          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
           gclog_or_tty->print_cr("Field "PTR_FORMAT
-                        " of live obj "PTR_FORMAT
-                        " points to dead obj "PTR_FORMAT".",
-                        p, (void*) _containing_obj, (void*) obj);
+                                 " of live obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 p, (void*) _containing_obj,
+                                 from->bottom(), from->end());
+          print_object(gclog_or_tty, _containing_obj);
+          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
+                                 (void*) obj);
+        } else {
+          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
+          gclog_or_tty->print_cr("Field "PTR_FORMAT
+                                 " of live obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 p, (void*) _containing_obj,
+                                 from->bottom(), from->end());
+          print_object(gclog_or_tty, _containing_obj);
+          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 (void*) obj, to->bottom(), to->end());
+          print_object(gclog_or_tty, obj);
         }
-        gclog_or_tty->print_cr("Live obj:");
-        _containing_obj->print_on(gclog_or_tty);
-        gclog_or_tty->print_cr("Bad referent:");
-        obj->print_on(gclog_or_tty);
         gclog_or_tty->print_cr("----------");
         _failures = true;
         failed = true;
@@ -432,7 +450,9 @@
     _young_type(NotYoung), _next_young_region(NULL),
     _next_dirty_cards_region(NULL),
     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
-    _rem_set(NULL), _zfs(NotZeroFilled)
+    _rem_set(NULL), _zfs(NotZeroFilled),
+    _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
+    _predicted_bytes_to_copy(0)
 {
   _orig_end = mr.end();
   // Note that initialize() will set the start of the unmarked area of the
@@ -715,7 +735,7 @@
   else
     st->print("   ");
   if (is_young())
-    st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y "));
+    st->print(is_survivor() ? " SU" : " Y ");
   else
     st->print("   ");
   if (is_empty())
@@ -723,6 +743,8 @@
   else
     st->print("  ");
   st->print(" %5d", _gc_time_stamp);
+  st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
+            prev_top_at_mark_start(), next_top_at_mark_start());
   G1OffsetTableContigSpace::print_on(st);
 }
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,7 +247,6 @@
 
   enum YoungType {
     NotYoung,                   // a region is not young
-    ScanOnly,                   // a region is young and scan-only
     Young,                      // a region is young
     Survivor                    // a region is young and it contains
                                 // survivor
@@ -292,6 +291,20 @@
     _young_type = new_type;
   }
 
+  // Cached attributes used in the collection set policy information
+
+  // The RSet length that was added to the total value
+  // for the collection set.
+  size_t _recorded_rs_length;
+
+  // The predicted elapsed time that was added to total value
+  // for the collection set.
+  double _predicted_elapsed_time_ms;
+
+  // The predicted number of bytes to copy that was added to
+  // the total value for the collection set.
+  size_t _predicted_bytes_to_copy;
+
  public:
   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
   HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
@@ -614,7 +627,6 @@
   // </PREDICTION>
 
   bool is_young() const     { return _young_type != NotYoung; }
-  bool is_scan_only() const { return _young_type == ScanOnly; }
   bool is_survivor() const  { return _young_type == Survivor; }
 
   int  young_index_in_cset() const { return _young_index_in_cset; }
@@ -629,12 +641,6 @@
     return _surv_rate_group->age_in_group(_age_index);
   }
 
-  void recalculate_age_in_surv_rate_group() {
-    assert( _surv_rate_group != NULL, "pre-condition" );
-    assert( _age_index > -1, "pre-condition" );
-    _age_index = _surv_rate_group->recalculate_age_index(_age_index);
-  }
-
   void record_surv_words_in_group(size_t words_survived) {
     assert( _surv_rate_group != NULL, "pre-condition" );
     assert( _age_index > -1, "pre-condition" );
@@ -676,8 +682,6 @@
 
   void set_young() { set_young_type(Young); }
 
-  void set_scan_only() { set_young_type(ScanOnly); }
-
   void set_survivor() { set_young_type(Survivor); }
 
   void set_not_young() { set_young_type(NotYoung); }
@@ -775,6 +779,22 @@
     _zero_filler = NULL;
   }
 
+  size_t recorded_rs_length() const        { return _recorded_rs_length; }
+  double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
+  size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
+
+  void set_recorded_rs_length(size_t rs_length) {
+    _recorded_rs_length = rs_length;
+  }
+
+  void set_predicted_elapsed_time_ms(double ms) {
+    _predicted_elapsed_time_ms = ms;
+  }
+
+  void set_predicted_bytes_to_copy(size_t bytes) {
+    _predicted_bytes_to_copy = bytes;
+  }
+
 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu May 13 18:26:35 2010 -0700
@@ -662,8 +662,6 @@
         prt = PosParPRT::alloc(from_hr);
       }
       prt->init(from_hr);
-      // Record the outgoing pointer in the from_region's outgoing bitmap.
-      from_hr->rem_set()->add_outgoing_reference(hr());
 
       PosParPRT* first_prt = _fine_grain_regions[ind];
       prt->set_next(first_prt);  // XXX Maybe move to init?
@@ -1073,11 +1071,7 @@
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
-    : _bosa(bosa), _other_regions(hr),
-      _outgoing_region_map(G1CollectedHeap::heap()->max_regions(),
-                           false /* in-resource-area */),
-      _iter_state(Unclaimed)
-{}
+  : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
 
 
 void HeapRegionRemSet::setup_remset_size() {
@@ -1148,30 +1142,11 @@
   PosParPRT::par_contract_all();
 }
 
-void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) {
-  _outgoing_region_map.par_at_put(to_hr->hrs_index(), 1);
-}
-
 void HeapRegionRemSet::clear() {
-  clear_outgoing_entries();
-  _outgoing_region_map.clear();
   _other_regions.clear();
   assert(occupied() == 0, "Should be clear.");
 }
 
-void HeapRegionRemSet::clear_outgoing_entries() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  size_t i = _outgoing_region_map.get_next_one_offset(0);
-  while (i < _outgoing_region_map.size()) {
-    HeapRegion* to_region = g1h->region_at(i);
-    if (!to_region->in_collection_set()) {
-      to_region->rem_set()->clear_incoming_entry(hr());
-    }
-    i = _outgoing_region_map.get_next_one_offset(i+1);
-  }
-}
-
-
 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
                              BitMap* region_bm, BitMap* card_bm) {
   _other_regions.scrub(ctbs, region_bm, card_bm);
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu May 13 18:26:35 2010 -0700
@@ -179,13 +179,6 @@
 
   OtherRegionsTable _other_regions;
 
-  // One set bit for every region that has an entry for this one.
-  BitMap _outgoing_region_map;
-
-  // Clear entries for the current region in any rem sets named in
-  // the _outgoing_region_map.
-  void clear_outgoing_entries();
-
   enum ParIterState { Unclaimed, Claimed, Complete };
   volatile ParIterState _iter_state;
   volatile jlong _iter_claimed;
@@ -243,10 +236,6 @@
     _other_regions.add_reference(from, tid);
   }
 
-  // Records the fact that the current region contains an outgoing
-  // reference into "to_hr".
-  void add_outgoing_reference(HeapRegion* to_hr);
-
   // Removes any entries shown by the given bitmaps to contain only dead
   // objects.
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Thu May 13 18:26:35 2010 -0700
@@ -25,8 +25,8 @@
 # include "incls/_precompiled.incl"
 # include "incls/_ptrQueue.cpp.incl"
 
-PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm) :
-  _qset(qset_), _buf(NULL), _index(0), _active(false),
+PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) :
+  _qset(qset_), _buf(NULL), _index(0), _active(active),
   _perm(perm), _lock(NULL)
 {}
 
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Thu May 13 18:26:35 2010 -0700
@@ -62,7 +62,7 @@
 public:
   // Initialize this queue to contain a null buffer, and be part of the
   // given PtrQueueSet.
-  PtrQueue(PtrQueueSet*, bool perm = false);
+  PtrQueue(PtrQueueSet*, bool perm = false, bool active = false);
   // Release any contained resources.
   void flush();
   // Calls flush() when destroyed.
@@ -101,6 +101,8 @@
     }
   }
 
+  bool is_active() { return _active; }
+
   static int byte_index_to_index(int ind) {
     assert((ind % oopSize) == 0, "Invariant.");
     return ind / oopSize;
@@ -257,7 +259,7 @@
   bool process_completed_buffers() { return _process_completed; }
   void set_process_completed(bool x) { _process_completed = x; }
 
-  bool active() { return _all_active; }
+  bool is_active() { return _all_active; }
 
   // Set the buffer size.  Should be called before any "enqueue" operation
   // can be called.  And should only be called once.
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu May 13 18:26:35 2010 -0700
@@ -82,9 +82,57 @@
   t->satb_mark_queue().handle_zero_index();
 }
 
-void SATBMarkQueueSet::set_active_all_threads(bool b) {
+#ifdef ASSERT
+void SATBMarkQueueSet::dump_active_values(JavaThread* first,
+                                          bool expected_active) {
+  gclog_or_tty->print_cr("SATB queue active values for Java Threads");
+  gclog_or_tty->print_cr(" SATB queue set: active is %s",
+                         (is_active()) ? "TRUE" : "FALSE");
+  gclog_or_tty->print_cr(" expected_active is %s",
+                         (expected_active) ? "TRUE" : "FALSE");
+  for (JavaThread* t = first; t; t = t->next()) {
+    bool active = t->satb_mark_queue().is_active();
+    gclog_or_tty->print_cr("  thread %s, active is %s",
+                           t->name(), (active) ? "TRUE" : "FALSE");
+  }
+}
+#endif // ASSERT
+
+void SATBMarkQueueSet::set_active_all_threads(bool b,
+                                              bool expected_active) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+  JavaThread* first = Threads::first();
+
+#ifdef ASSERT
+  if (_all_active != expected_active) {
+    dump_active_values(first, expected_active);
+
+    // I leave this here as a guarantee, instead of an assert, so
+    // that it will still be compiled in if we choose to uncomment
+    // the #ifdef ASSERT in a product build. The whole block is
+    // within an #ifdef ASSERT so the guarantee will not be compiled
+    // in a product build anyway.
+    guarantee(false,
+              "SATB queue set has an unexpected active value");
+  }
+#endif // ASSERT
   _all_active = b;
-  for(JavaThread* t = Threads::first(); t; t = t->next()) {
+
+  for (JavaThread* t = first; t; t = t->next()) {
+#ifdef ASSERT
+    bool active = t->satb_mark_queue().is_active();
+    if (active != expected_active) {
+      dump_active_values(first, expected_active);
+
+      // I leave this here as a guarantee, instead of an assert, so
+      // that it will still be compiled in if we choose to uncomment
+      // the #ifdef ASSERT in a product build. The whole block is
+      // within an #ifdef ASSERT so the guarantee will not be compiled
+      // in a product build anyway.
+      guarantee(false,
+                "thread has an unexpected active value in its SATB queue");
+    }
+#endif // ASSERT
     t->satb_mark_queue().set_active(b);
   }
 }
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Thu May 13 18:26:35 2010 -0700
@@ -29,8 +29,7 @@
 class ObjPtrQueue: public PtrQueue {
 public:
   ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) :
-    PtrQueue(qset_, perm)
-  {}
+    PtrQueue(qset_, perm, qset_->is_active()) { }
   // Apply the closure to all elements, and reset the index to make the
   // buffer empty.
   void apply_closure(ObjectClosure* cl);
@@ -55,6 +54,9 @@
   // is ignored.
   bool apply_closure_to_completed_buffer_work(bool par, int worker);
 
+#ifdef ASSERT
+  void dump_active_values(JavaThread* first, bool expected_active);
+#endif // ASSERT
 
 public:
   SATBMarkQueueSet();
@@ -65,9 +67,11 @@
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
-  // Apply "set_active(b)" to all thread tloq's.  Should be called only
-  // with the world stopped.
-  void set_active_all_threads(bool b);
+  // Apply "set_active(b)" to all Java threads' SATB queues. It should be
+  // called only with the world stopped. The method will assert that the
+  // SATB queues of all threads it visits, as well as the SATB queue
+  // set itself, has an active value same as expected_active.
+  void set_active_all_threads(bool b, bool expected_active);
 
   // Register "blk" as "the closure" for all queues.  Only one such closure
   // is allowed.  The "apply_closure_to_completed_buffer" method will apply
--- a/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,6 @@
 void SurvRateGroup::reset()
 {
   _all_regions_allocated = 0;
-  _scan_only_prefix      = 0;
   _setup_seq_num         = 0;
   _stats_arrays_length   = 0;
   _accum_surv_rate       = 0.0;
@@ -74,7 +73,7 @@
 void
 SurvRateGroup::start_adding_regions() {
   _setup_seq_num   = _stats_arrays_length;
-  _region_num      = _scan_only_prefix;
+  _region_num      = 0;
   _accum_surv_rate = 0.0;
 
 #if 0
@@ -164,12 +163,6 @@
 }
 
 void
-SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
-  guarantee( scan_only_prefix <= _region_num, "pre-condition" );
-  _scan_only_prefix = scan_only_prefix;
-}
-
-void
 SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
   guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
              "pre-condition" );
@@ -218,13 +211,12 @@
 #ifndef PRODUCT
 void
 SurvRateGroup::print() {
-  gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
-                _name, _region_num, _scan_only_prefix);
+  gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)",
+                _name, _region_num);
   for (size_t i = 0; i < _region_num; ++i) {
-    gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%%s",
+    gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%",
                   i, _surv_rate[i] * 100.0,
-                  _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
-                  (i < _scan_only_prefix) ? " S-O" : "    ");
+                  _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
   }
 }
 
--- a/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,6 @@
 
   int _all_regions_allocated;
   size_t _region_num;
-  size_t _scan_only_prefix;
   size_t _setup_seq_num;
 
 public:
@@ -51,13 +50,11 @@
   void reset();
   void start_adding_regions();
   void stop_adding_regions();
-  void record_scan_only_prefix(size_t scan_only_prefix);
   void record_surviving_words(int age_in_group, size_t surv_words);
   void all_surviving_words_recorded(bool propagate);
   const char* name() { return _name; }
 
   size_t region_num() { return _region_num; }
-  size_t scan_only_length() { return _scan_only_prefix; }
   double accum_surv_rate_pred(int age) {
     assert(age >= 0, "must be");
     if ((size_t)age < _stats_arrays_length)
@@ -82,17 +79,12 @@
 
   int next_age_index();
   int age_in_group(int age_index) {
-    int ret = (int) (_all_regions_allocated -  age_index);
+    int ret = (int) (_all_regions_allocated - age_index);
     assert( ret >= 0, "invariant" );
     return ret;
   }
-  int recalculate_age_index(int age_index) {
-    int new_age_index = (int) _scan_only_prefix - age_in_group(age_index);
-    guarantee( new_age_index >= 0, "invariant" );
-    return new_age_index;
-  }
   void finished_recalculating_age_indexes() {
-    _all_regions_allocated = (int) _scan_only_prefix;
+    _all_regions_allocated = 0;
   }
 
 #ifndef PRODUCT
--- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -161,8 +161,10 @@
 parMarkBitMap.hpp			bitMap.inline.hpp
 parMarkBitMap.hpp			psVirtualspace.hpp
 
+psAdaptiveSizePolicy.cpp                collectorPolicy.hpp
 psAdaptiveSizePolicy.cpp                gcPolicyCounters.hpp
 psAdaptiveSizePolicy.cpp		gcCause.hpp
+psAdaptiveSizePolicy.cpp                generationSizer.hpp
 psAdaptiveSizePolicy.cpp                psAdaptiveSizePolicy.hpp
 psAdaptiveSizePolicy.cpp                psGCAdaptivePolicyCounters.hpp
 psAdaptiveSizePolicy.cpp                psScavenge.hpp
@@ -175,6 +177,7 @@
 psAdaptiveSizePolicy.hpp		adaptiveSizePolicy.hpp
 
 psCompactionManager.cpp                 gcTaskManager.hpp
+psCompactionManager.cpp                 objArrayKlass.inline.hpp
 psCompactionManager.cpp                 objectStartArray.hpp
 psCompactionManager.cpp                 oop.hpp
 psCompactionManager.cpp                 oop.inline.hpp
@@ -189,6 +192,9 @@
 psCompactionManager.hpp                 allocation.hpp
 psCompactionManager.hpp                 taskqueue.hpp
 
+psCompactionManager.inline.hpp		psCompactionManager.hpp
+psCompactionManager.inline.hpp		psParallelCompact.hpp
+
 psGCAdaptivePolicyCounters.hpp		gcAdaptivePolicyCounters.hpp
 psGCAdaptivePolicyCounters.hpp          gcPolicyCounters.hpp
 psGCAdaptivePolicyCounters.hpp          psAdaptiveSizePolicy.hpp
@@ -211,6 +217,7 @@
 psMarkSweep.cpp                         fprofiler.hpp
 psMarkSweep.cpp                         gcCause.hpp
 psMarkSweep.cpp                         gcLocker.inline.hpp
+psMarkSweep.cpp                         generationSizer.hpp
 psMarkSweep.cpp                         isGCActiveMark.hpp
 psMarkSweep.cpp                         oop.inline.hpp
 psMarkSweep.cpp                         memoryService.hpp
@@ -252,6 +259,7 @@
 psParallelCompact.cpp			gcCause.hpp
 psParallelCompact.cpp			gcLocker.inline.hpp
 psParallelCompact.cpp                   gcTaskManager.hpp
+psParallelCompact.cpp                   generationSizer.hpp
 psParallelCompact.cpp			isGCActiveMark.hpp
 psParallelCompact.cpp			management.hpp
 psParallelCompact.cpp			memoryService.hpp
@@ -340,10 +348,12 @@
 psScavenge.cpp                          psAdaptiveSizePolicy.hpp
 psScavenge.cpp                          biasedLocking.hpp
 psScavenge.cpp                          cardTableExtension.hpp
+psScavenge.cpp                          collectorPolicy.hpp
 psScavenge.cpp                          fprofiler.hpp
 psScavenge.cpp                          gcCause.hpp
 psScavenge.cpp                          gcLocker.inline.hpp
 psScavenge.cpp                          gcTaskManager.hpp
+psScavenge.cpp                          generationSizer.hpp
 psScavenge.cpp                          handles.inline.hpp
 psScavenge.cpp                          isGCActiveMark.hpp
 psScavenge.cpp                          oop.inline.hpp
@@ -379,12 +389,12 @@
 pcTasks.cpp                             jniHandles.hpp
 pcTasks.cpp                             jvmtiExport.hpp
 pcTasks.cpp                             management.hpp
+pcTasks.cpp                             objArrayKlass.inline.hpp
 pcTasks.cpp                             psParallelCompact.hpp
 pcTasks.cpp                             pcTasks.hpp
 pcTasks.cpp                             oop.inline.hpp
 pcTasks.cpp                             oop.pcgc.inline.hpp
 pcTasks.cpp                             systemDictionary.hpp
-pcTasks.cpp                             taskqueue.hpp
 pcTasks.cpp                             thread.hpp
 pcTasks.cpp                             universe.hpp
 pcTasks.cpp                             vmThread.hpp
--- a/src/share/vm/gc_implementation/includeDB_gc_serial	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_serial	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //   
 // This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 adaptiveSizePolicy.hpp			universe.hpp
 
 adaptiveSizePolicy.cpp			adaptiveSizePolicy.hpp
+adaptiveSizePolicy.cpp			collectorPolicy.hpp
 adaptiveSizePolicy.cpp			gcCause.hpp
 adaptiveSizePolicy.cpp			ostream.hpp
 adaptiveSizePolicy.cpp			timer.hpp
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -892,6 +892,10 @@
     }
     swap_spaces();
 
+    // A successful scavenge should restart the GC time limit count which is
+    // for full GC's.
+    size_policy->reset_gc_overhead_limit_count();
+
     assert(to()->is_empty(), "to space should be empty now");
   } else {
     assert(HandlePromotionFailure,
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,15 +54,16 @@
   CollectedHeap::pre_initialize();
 
   // Cannot be initialized until after the flags are parsed
-  GenerationSizer flag_parser;
+  // GenerationSizer flag_parser;
+  _collector_policy = new GenerationSizer();
 
-  size_t yg_min_size = flag_parser.min_young_gen_size();
-  size_t yg_max_size = flag_parser.max_young_gen_size();
-  size_t og_min_size = flag_parser.min_old_gen_size();
-  size_t og_max_size = flag_parser.max_old_gen_size();
+  size_t yg_min_size = _collector_policy->min_young_gen_size();
+  size_t yg_max_size = _collector_policy->max_young_gen_size();
+  size_t og_min_size = _collector_policy->min_old_gen_size();
+  size_t og_max_size = _collector_policy->max_old_gen_size();
   // Why isn't there a min_perm_gen_size()?
-  size_t pg_min_size = flag_parser.perm_gen_size();
-  size_t pg_max_size = flag_parser.max_perm_gen_size();
+  size_t pg_min_size = _collector_policy->perm_gen_size();
+  size_t pg_max_size = _collector_policy->max_perm_gen_size();
 
   trace_gen_sizes("ps heap raw",
                   pg_min_size, pg_max_size,
@@ -89,12 +90,14 @@
   // move to the common code.
   yg_min_size = align_size_up(yg_min_size, yg_align);
   yg_max_size = align_size_up(yg_max_size, yg_align);
-  size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align);
+  size_t yg_cur_size =
+    align_size_up(_collector_policy->young_gen_size(), yg_align);
   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
 
   og_min_size = align_size_up(og_min_size, og_align);
   og_max_size = align_size_up(og_max_size, og_align);
-  size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align);
+  size_t og_cur_size =
+    align_size_up(_collector_policy->old_gen_size(), og_align);
   og_cur_size = MAX2(og_cur_size, og_min_size);
 
   pg_min_size = align_size_up(pg_min_size, pg_align);
@@ -355,6 +358,11 @@
   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
+  // In general gc_overhead_limit_was_exceeded should be false so
+  // set it so here and reset it to true only if the gc time
+  // limit is being exceeded as checked below.
+  *gc_overhead_limit_was_exceeded = false;
+
   HeapWord* result = young_gen()->allocate(size, is_tlab);
 
   uint loop_count = 0;
@@ -428,24 +436,6 @@
 
     if (result == NULL) {
 
-      // Exit the loop if if the gc time limit has been exceeded.
-      // The allocation must have failed above (result must be NULL),
-      // and the most recent collection must have exceeded the
-      // gc time limit.  Exit the loop so that an out-of-memory
-      // will be thrown (returning a NULL will do that), but
-      // clear gc_time_limit_exceeded so that the next collection
-      // will succeeded if the applications decides to handle the
-      // out-of-memory and tries to go on.
-      *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
-      if (size_policy()->gc_time_limit_exceeded()) {
-        size_policy()->set_gc_time_limit_exceeded(false);
-        if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
-          "return NULL because gc_time_limit_exceeded is set");
-        }
-        return NULL;
-      }
-
       // Generate a VM operation
       VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
       VMThread::execute(&op);
@@ -463,16 +453,34 @@
           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
           continue;  // retry and/or stall as necessary
         }
-        // If a NULL result is being returned, an out-of-memory
-        // will be thrown now.  Clear the gc_time_limit_exceeded
-        // flag to avoid the following situation.
-        //      gc_time_limit_exceeded is set during a collection
-        //      the collection fails to return enough space and an OOM is thrown
-        //      the next GC is skipped because the gc_time_limit_exceeded
-        //        flag is set and another OOM is thrown
-        if (op.result() == NULL) {
-          size_policy()->set_gc_time_limit_exceeded(false);
+
+        // Exit the loop if the gc time limit has been exceeded.
+        // The allocation must have failed above ("result" guarding
+        // this path is NULL) and the most recent collection has exceeded the
+        // gc overhead limit (although enough may have been collected to
+        // satisfy the allocation).  Exit the loop so that an out-of-memory
+        // will be thrown (return a NULL ignoring the contents of
+        // op.result()),
+        // but clear gc_overhead_limit_exceeded so that the next collection
+        // starts with a clean slate (i.e., forgets about previous overhead
+        // excesses).  Fill op.result() with a filler object so that the
+        // heap remains parsable.
+        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
+        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
+        assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+        if (limit_exceeded && softrefs_clear) {
+          *gc_overhead_limit_was_exceeded = true;
+          size_policy()->set_gc_overhead_limit_exceeded(false);
+          if (PrintGCDetails && Verbose) {
+            gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
+              "return NULL because gc_overhead_limit_exceeded is set");
+          }
+          if (op.result() != NULL) {
+            CollectedHeap::fill_with_object(op.result(), size);
+          }
+          return NULL;
         }
+
         return op.result();
       }
     }
@@ -613,14 +621,15 @@
       // and the most recent collection must have exceeded the
       // gc time limit.  Exit the loop so that an out-of-memory
       // will be thrown (returning a NULL will do that), but
-      // clear gc_time_limit_exceeded so that the next collection
+      // clear gc_overhead_limit_exceeded so that the next collection
       // will succeeded if the applications decides to handle the
       // out-of-memory and tries to go on.
-      if (size_policy()->gc_time_limit_exceeded()) {
-        size_policy()->set_gc_time_limit_exceeded(false);
+      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
+      if (limit_exceeded) {
+        size_policy()->set_gc_overhead_limit_exceeded(false);
         if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
-          "return NULL because gc_time_limit_exceeded is set");
+          gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
+            " return NULL because gc_overhead_limit_exceeded is set");
         }
         assert(result == NULL, "Allocation did not fail");
         return NULL;
@@ -643,14 +652,15 @@
           continue;  // retry and/or stall as necessary
         }
         // If a NULL results is being returned, an out-of-memory
-        // will be thrown now.  Clear the gc_time_limit_exceeded
+        // will be thrown now.  Clear the gc_overhead_limit_exceeded
         // flag to avoid the following situation.
-        //      gc_time_limit_exceeded is set during a collection
+        //      gc_overhead_limit_exceeded is set during a collection
         //      the collection fails to return enough space and an OOM is thrown
-        //      the next GC is skipped because the gc_time_limit_exceeded
-        //        flag is set and another OOM is thrown
+        //      a subsequent GC prematurely throws an out-of-memory because
+        //        the gc_overhead_limit_exceeded counts did not start
+        //        again from 0.
         if (op.result() == NULL) {
-          size_policy()->set_gc_time_limit_exceeded(false);
+          size_policy()->reset_gc_overhead_limit_count();
         }
         return op.result();
       }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 class AdjoiningGenerations;
 class GCTaskManager;
 class PSAdaptiveSizePolicy;
+class GenerationSizer;
+class CollectorPolicy;
 
 class ParallelScavengeHeap : public CollectedHeap {
   friend class VMStructs;
@@ -43,6 +45,8 @@
   size_t _young_gen_alignment;
   size_t _old_gen_alignment;
 
+  GenerationSizer* _collector_policy;
+
   inline size_t set_alignment(size_t& var, size_t val);
 
   // Collection of generations that are adjacent in the
@@ -72,6 +76,9 @@
     return CollectedHeap::ParallelScavengeHeap;
   }
 
+CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
+  // GenerationSizer* collector_policy() const { return _collector_policy; }
+
   static PSYoungGen* young_gen()     { return _young_gen; }
   static PSOldGen* old_gen()         { return _old_gen; }
   static PSPermGen* perm_gen()       { return _perm_gen; }
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Thu May 13 18:26:35 2010 -0700
@@ -48,7 +48,7 @@
     _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
 
   // Do the real work
-  cm->drain_marking_stacks(&mark_and_push_closure);
+  cm->follow_marking_stacks();
 }
 
 
@@ -118,7 +118,7 @@
   }
 
   // Do the real work
-  cm->drain_marking_stacks(&mark_and_push_closure);
+  cm->follow_marking_stacks();
   // cm->deallocate_stacks();
 }
 
@@ -196,17 +196,19 @@
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
 
   oop obj = NULL;
+  ObjArrayTask task;
   int random_seed = 17;
-  while(true) {
-    if (ParCompactionManager::steal(which, &random_seed, obj)) {
+  do {
+    while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
+      objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
+      k->oop_follow_contents(cm, task.obj(), task.index());
+      cm->follow_marking_stacks();
+    }
+    while (ParCompactionManager::steal(which, &random_seed, obj)) {
       obj->follow_contents(cm);
-      cm->drain_marking_stacks(&mark_and_push_closure);
-    } else {
-      if (terminator()->offer_termination()) {
-        break;
-      }
+      cm->follow_marking_stacks();
     }
-  }
+  } while (!terminator()->offer_termination());
 }
 
 //
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -184,18 +184,19 @@
   set_change_young_gen_for_maj_pauses(0);
 }
 
-
 // If this is not a full GC, only test and modify the young generation.
 
-void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live,
-                                               size_t eden_live,
-                                               size_t old_live,
-                                               size_t perm_live,
-                                               size_t cur_eden,
-                                               size_t max_old_gen_size,
-                                               size_t max_eden_size,
-                                               bool   is_full_gc,
-                                               GCCause::Cause gc_cause) {
+void PSAdaptiveSizePolicy::compute_generation_free_space(
+                                           size_t young_live,
+                                           size_t eden_live,
+                                           size_t old_live,
+                                           size_t perm_live,
+                                           size_t cur_eden,
+                                           size_t max_old_gen_size,
+                                           size_t max_eden_size,
+                                           bool   is_full_gc,
+                                           GCCause::Cause gc_cause,
+                                           CollectorPolicy* collector_policy) {
 
   // Update statistics
   // Time statistics are updated as we go, update footprint stats here
@@ -380,91 +381,16 @@
   // Is too much time being spent in GC?
   //   Is the heap trying to grow beyond it's limits?
 
-  const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
+  const size_t free_in_old_gen =
+    (size_t)(max_old_gen_size - avg_old_live()->average());
   if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
-
-    // eden_limit is the upper limit on the size of eden based on
-    // the maximum size of the young generation and the sizes
-    // of the survivor space.
-    // The question being asked is whether the gc costs are high
-    // and the space being recovered by a collection is low.
-    // free_in_young_gen is the free space in the young generation
-    // after a collection and promo_live is the free space in the old
-    // generation after a collection.
-    //
-    // Use the minimum of the current value of the live in the
-    // young gen or the average of the live in the young gen.
-    // If the current value drops quickly, that should be taken
-    // into account (i.e., don't trigger if the amount of free
-    // space has suddenly jumped up).  If the current is much
-    // higher than the average, use the average since it represents
-    // the longer term behavor.
-    const size_t live_in_eden = MIN2(eden_live, (size_t) avg_eden_live()->average());
-    const size_t free_in_eden = eden_limit > live_in_eden ?
-      eden_limit - live_in_eden : 0;
-    const size_t total_free_limit = free_in_old_gen + free_in_eden;
-    const size_t total_mem = max_old_gen_size + max_eden_size;
-    const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
-    if (PrintAdaptiveSizePolicy && (Verbose ||
-        (total_free_limit < (size_t) mem_free_limit))) {
-      gclog_or_tty->print_cr(
-            "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
-            " promo_limit: " SIZE_FORMAT
-            " eden_limit: " SIZE_FORMAT
-            " total_free_limit: " SIZE_FORMAT
-            " max_old_gen_size: " SIZE_FORMAT
-            " max_eden_size: " SIZE_FORMAT
-            " mem_free_limit: " SIZE_FORMAT,
-            promo_limit, eden_limit, total_free_limit,
-            max_old_gen_size, max_eden_size,
-            (size_t) mem_free_limit);
-    }
-
-    if (is_full_gc) {
-      if (gc_cost() > gc_cost_limit &&
-        total_free_limit < (size_t) mem_free_limit) {
-        // Collections, on average, are taking too much time, and
-        //      gc_cost() > gc_cost_limit
-        // we have too little space available after a full gc.
-        //      total_free_limit < mem_free_limit
-        // where
-        //   total_free_limit is the free space available in
-        //     both generations
-        //   total_mem is the total space available for allocation
-        //     in both generations (survivor spaces are not included
-        //     just as they are not included in eden_limit).
-        //   mem_free_limit is a fraction of total_mem judged to be an
-        //     acceptable amount that is still unused.
-        // The heap can ask for the value of this variable when deciding
-        // whether to thrown an OutOfMemory error.
-        // Note that the gc time limit test only works for the collections
-        // of the young gen + tenured gen and not for collections of the
-        // permanent gen.  That is because the calculation of the space
-        // freed by the collection is the free space in the young gen +
-        // tenured gen.
-        // Ignore explicit GC's. Ignoring explicit GC's at this level
-        // is the equivalent of the GC did not happen as far as the
-        // overhead calculation is concerted (i.e., the flag is not set
-        // and the count is not affected).  Also the average will not
-        // have been updated unless UseAdaptiveSizePolicyWithSystemGC is on.
-        if (!GCCause::is_user_requested_gc(gc_cause) &&
-            !GCCause::is_serviceability_requested_gc(gc_cause)) {
-          inc_gc_time_limit_count();
-          if (UseGCOverheadLimit &&
-              (gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){
-            // All conditions have been met for throwing an out-of-memory
-            _gc_time_limit_exceeded = true;
-            // Avoid consecutive OOM due to the gc time limit by resetting
-            // the counter.
-            reset_gc_time_limit_count();
-          }
-          _print_gc_time_limit_would_be_exceeded = true;
-        }
-      } else {
-        // Did not exceed overhead limits
-        reset_gc_time_limit_count();
-      }
-    }
+    check_gc_overhead_limit(young_live,
+                            eden_live,
+                            max_old_gen_size,
+                            max_eden_size,
+                            is_full_gc,
+                            gc_cause,
+                            collector_policy);
   }
 
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
 
 // Forward decls
 class elapsedTimer;
+class GenerationSizer;
 
 class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
  friend class PSGCAdaptivePolicyCounters;
@@ -340,7 +341,8 @@
                                      size_t max_old_gen_size,
                                      size_t max_eden_size,
                                      bool   is_full_gc,
-                                     GCCause::Cause gc_cause);
+                                     GCCause::Cause gc_cause,
+                                     CollectorPolicy* collector_policy);
 
   // Calculates new survivor space size;  returns a new tenuring threshold
   // value. Stores new survivor size in _survivor_size.
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Thu May 13 18:26:35 2010 -0700
@@ -28,6 +28,8 @@
 PSOldGen*            ParCompactionManager::_old_gen = NULL;
 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
 OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
+ParCompactionManager::ObjArrayTaskQueueSet*
+  ParCompactionManager::_objarray_queues = NULL;
 ObjectStartArray*    ParCompactionManager::_start_array = NULL;
 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
 RegionTaskQueueSet*   ParCompactionManager::_region_array = NULL;
@@ -46,6 +48,11 @@
 
   // We want the overflow stack to be permanent
   _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
+
+  _objarray_queue.initialize();
+  _objarray_overflow_stack =
+    new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
+
 #ifdef USE_RegionTaskQueueWithOverflow
   region_stack()->initialize();
 #else
@@ -69,6 +76,7 @@
 
 ParCompactionManager::~ParCompactionManager() {
   delete _overflow_stack;
+  delete _objarray_overflow_stack;
   delete _revisit_klass_stack;
   delete _revisit_mdo_stack;
   // _manager_array and _stack_array are statics
@@ -86,18 +94,21 @@
 
   assert(_manager_array == NULL, "Attempt to initialize twice");
   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
-  guarantee(_manager_array != NULL, "Could not initialize promotion manager");
+  guarantee(_manager_array != NULL, "Could not allocate manager_array");
 
   _stack_array = new OopTaskQueueSet(parallel_gc_threads);
-  guarantee(_stack_array != NULL, "Count not initialize promotion manager");
+  guarantee(_stack_array != NULL, "Could not allocate stack_array");
+  _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
+  guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
   _region_array = new RegionTaskQueueSet(parallel_gc_threads);
-  guarantee(_region_array != NULL, "Count not initialize promotion manager");
+  guarantee(_region_array != NULL, "Could not allocate region_array");
 
   // Create and register the ParCompactionManager(s) for the worker threads.
   for(uint i=0; i<parallel_gc_threads; i++) {
     _manager_array[i] = new ParCompactionManager();
     guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
     stack_array()->register_queue(i, _manager_array[i]->marking_stack());
+    _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue);
 #ifdef USE_RegionTaskQueueWithOverflow
     region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
 #else
@@ -203,36 +214,30 @@
   }
 }
 
-void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
-#ifdef ASSERT
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-  MutableSpace* to_space = heap->young_gen()->to_space();
-  MutableSpace* old_space = heap->old_gen()->object_space();
-  MutableSpace* perm_space = heap->perm_gen()->object_space();
-#endif /* ASSERT */
-
-
+void ParCompactionManager::follow_marking_stacks() {
   do {
-
-    // Drain overflow stack first, so other threads can steal from
-    // claimed stack while we work.
-    while(!overflow_stack()->is_empty()) {
-      oop obj = overflow_stack()->pop();
+    // Drain the overflow stack first, to allow stealing from the marking stack.
+    oop obj;
+    while (!overflow_stack()->is_empty()) {
+      overflow_stack()->pop()->follow_contents(this);
+    }
+    while (marking_stack()->pop_local(obj)) {
       obj->follow_contents(this);
     }
 
-    oop obj;
-    // obj is a reference!!!
-    while (marking_stack()->pop_local(obj)) {
-      // It would be nice to assert about the type of objects we might
-      // pop, but they can come from anywhere, unfortunately.
-      obj->follow_contents(this);
+    // Process ObjArrays one at a time to avoid marking stack bloat.
+    ObjArrayTask task;
+    if (!_objarray_overflow_stack->is_empty()) {
+      task = _objarray_overflow_stack->pop();
+      objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
+      k->oop_follow_contents(this, task.obj(), task.index());
+    } else if (_objarray_queue.pop_local(task)) {
+      objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
+      k->oop_follow_contents(this, task.obj(), task.index());
     }
-  } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
+  } while (!marking_stacks_empty());
 
-  assert(marking_stack()->size() == 0, "Sanity");
-  assert(overflow_stack()->length() == 0, "Sanity");
+  assert(marking_stacks_empty(), "Sanity");
 }
 
 void ParCompactionManager::drain_region_overflow_stack() {
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Thu May 13 18:26:35 2010 -0700
@@ -22,18 +22,6 @@
  *
  */
 
-//
-// psPromotionManager is used by a single thread to manage object survival
-// during a scavenge. The promotion manager contains thread local data only.
-//
-// NOTE! Be carefull when allocating the stacks on cheap. If you are going
-// to use a promotion manager in more than one thread, the stacks MUST be
-// on cheap. This can lead to memory leaks, though, as they are not auto
-// deallocated.
-//
-// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
-//
-
 // Move to some global location
 #define HAS_BEEN_MOVED 0x1501d01d
 // End move to some global location
@@ -46,8 +34,6 @@
 class ParallelCompactData;
 class ParMarkBitMap;
 
-// Move to it's own file if this works out.
-
 class ParCompactionManager : public CHeapObj {
   friend class ParallelTaskTerminator;
   friend class ParMarkBitMap;
@@ -72,14 +58,27 @@
 // ------------------------  End don't putback if not needed
 
  private:
+  // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
+  #define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
+  typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue;
+  typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
+  #undef OBJARRAY_QUEUE_SIZE
+
   static ParCompactionManager** _manager_array;
   static OopTaskQueueSet*       _stack_array;
+  static ObjArrayTaskQueueSet*  _objarray_queues;
   static ObjectStartArray*      _start_array;
   static RegionTaskQueueSet*    _region_array;
   static PSOldGen*              _old_gen;
 
+private:
   OopTaskQueue                  _marking_stack;
   GrowableArray<oop>*           _overflow_stack;
+
+  typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack;
+  ObjArrayTaskQueue             _objarray_queue;
+  ObjArrayOverflowStack*        _objarray_overflow_stack;
+
   // Is there a way to reuse the _marking_stack for the
   // saving empty regions?  For now just create a different
   // type of TaskQueue.
@@ -128,8 +127,8 @@
   // Pushes onto the region stack.  If the region stack is full,
   // pushes onto the region overflow stack.
   void region_stack_push(size_t region_index);
- public:
 
+public:
   Action action() { return _action; }
   void set_action(Action v) { _action = v; }
 
@@ -163,6 +162,8 @@
   // Get a oop for scanning.  If returns null, no oop were found.
   oop retrieve_for_scanning();
 
+  inline void push_objarray(oop obj, size_t index);
+
   // Save region for later processing.  Must not fail.
   void save_for_processing(size_t region_index);
   // Get a region for processing.  If returns null, no region were found.
@@ -175,12 +176,17 @@
     return stack_array()->steal(queue_num, seed, t);
   }
 
+  static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
+    return _objarray_queues->steal(queue_num, seed, t);
+  }
+
   static bool steal(int queue_num, int* seed, RegionTask& t) {
     return region_array()->steal(queue_num, seed, t);
   }
 
-  // Process tasks remaining on any stack
-  void drain_marking_stacks(OopClosure *blk);
+  // Process tasks remaining on any marking stack
+  void follow_marking_stacks();
+  inline bool marking_stacks_empty() const;
 
   // Process tasks remaining on any stack
   void drain_region_stacks();
@@ -200,3 +206,8 @@
     "out of range manager_array access");
   return _manager_array[index];
 }
+
+bool ParCompactionManager::marking_stacks_empty() const {
+  return _marking_stack.size() == 0 && _overflow_stack->is_empty() &&
+    _objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+void ParCompactionManager::push_objarray(oop obj, size_t index)
+{
+  ObjArrayTask task(obj, index);
+  assert(task.is_valid(), "bad ObjArrayTask");
+  if (!_objarray_queue.push(task)) {
+    _objarray_overflow_stack->push(task);
+  }
+}
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,11 +117,13 @@
       PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded");
-    _gc_time_limit_exceeded = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Events, ps_size_policy()->gc_time_limit_exceeded(), CHECK);
+    _gc_overhead_limit_exceeded_counter =
+      PerfDataManager::create_variable(SUN_GC, cname,
+      PerfData::U_Events, ps_size_policy()->gc_overhead_limit_exceeded(), CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc");
-    _live_at_last_full_gc = PerfDataManager::create_variable(SUN_GC, cname,
+    _live_at_last_full_gc_counter =
+      PerfDataManager::create_variable(SUN_GC, cname,
       PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope");
@@ -189,6 +191,8 @@
     update_minor_pause_old_slope();
     update_major_pause_young_slope();
     update_minor_collection_slope_counter();
+    update_gc_overhead_limit_exceeded_counter();
+    update_live_at_last_full_gc_counter();
   }
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,8 @@
   PerfVariable* _live_space;
   PerfVariable* _free_space;
   PerfVariable* _avg_base_footprint;
-  PerfVariable* _gc_time_limit_exceeded;
-  PerfVariable* _live_at_last_full_gc;
+  PerfVariable* _gc_overhead_limit_exceeded_counter;
+  PerfVariable* _live_at_last_full_gc_counter;
   PerfVariable* _old_capacity;
   PerfVariable* _boundary_moved;
 
@@ -169,6 +169,14 @@
       (jlong)(ps_size_policy()->major_pause_young_slope() * 1000)
     );
   }
+  inline void update_gc_overhead_limit_exceeded_counter() {
+    _gc_overhead_limit_exceeded_counter->set_value(
+      (jlong) ps_size_policy()->gc_overhead_limit_exceeded());
+  }
+  inline void update_live_at_last_full_gc_counter() {
+    _live_at_last_full_gc_counter->set_value(
+      (jlong)(ps_size_policy()->live_at_last_full_gc()));
+  }
 
   inline void update_scavenge_skipped(int cause) {
     _scavenge_skipped->set_value(cause);
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,12 @@
 //
 // Note that this method should only be called from the vm_thread while
 // at a safepoint!
+//
+// Note that the all_soft_refs_clear flag in the collector policy
+// may be true because this method can be called without intervening
+// activity.  For example when the heap space is tight and full measure
+// are being taken to free space.
+
 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
@@ -54,24 +60,18 @@
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   GCCause::Cause gc_cause = heap->gc_cause();
   PSAdaptiveSizePolicy* policy = heap->size_policy();
+  IsGCActiveMark mark;
 
-  // Before each allocation/collection attempt, find out from the
-  // policy object if GCs are, on the whole, taking too long. If so,
-  // bail out without attempting a collection.  The exceptions are
-  // for explicitly requested GC's.
-  if (!policy->gc_time_limit_exceeded() ||
-      GCCause::is_user_requested_gc(gc_cause) ||
-      GCCause::is_serviceability_requested_gc(gc_cause)) {
-    IsGCActiveMark mark;
+  if (ScavengeBeforeFullGC) {
+    PSScavenge::invoke_no_policy();
+  }
 
-    if (ScavengeBeforeFullGC) {
-      PSScavenge::invoke_no_policy();
-    }
+  const bool clear_all_soft_refs =
+    heap->collector_policy()->should_clear_all_soft_refs();
 
-    int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
-    IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
-    PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
-  }
+  int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
+  IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
+  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 }
 
 // This method contains no policy. You should probably
@@ -89,6 +89,10 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 
+  // The scope of casr should end after code that can change
+  // CollectorPolicy::_should_clear_all_soft_refs.
+  ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
+
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
   PSPermGen* perm_gen = heap->perm_gen();
@@ -275,7 +279,8 @@
                                  old_gen->max_gen_size(),
                                  max_eden_size,
                                  true /* full gc*/,
-                                 gc_cause);
+                                 gc_cause,
+                                 heap->collector_policy());
 
         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 
@@ -326,19 +331,6 @@
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
     heap->update_counters();
-
-    if (PrintGCDetails) {
-      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
-        if (size_policy->gc_time_limit_exceeded()) {
-          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        } else {
-          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        }
-      }
-      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
-    }
   }
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
@@ -479,6 +471,7 @@
   _preserved_oop_stack = NULL;
 
   _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
+  _objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
 
   int size = SystemDictionary::number_of_classes() * 2;
   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
@@ -497,6 +490,7 @@
   }
 
   delete _marking_stack;
+  delete _objarray_stack;
   delete _revisit_klass_stack;
   delete _revisit_mdo_stack;
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -785,7 +785,7 @@
 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
 
-void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
+void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
 
 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p)       { mark_and_push(_compaction_manager, p); }
 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
@@ -1923,31 +1923,32 @@
 //
 // Note that this method should only be called from the vm_thread while at a
 // safepoint.
+//
+// Note that the all_soft_refs_clear flag in the collector policy
+// may be true because this method can be called without intervening
+// activity.  For example when the heap space is tight and full measure
+// are being taken to free space.
 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
          "should be in vm thread");
+
   ParallelScavengeHeap* heap = gc_heap();
   GCCause::Cause gc_cause = heap->gc_cause();
   assert(!heap->is_gc_active(), "not reentrant");
 
   PSAdaptiveSizePolicy* policy = heap->size_policy();
-
-  // Before each allocation/collection attempt, find out from the
-  // policy object if GCs are, on the whole, taking too long. If so,
-  // bail out without attempting a collection.  The exceptions are
-  // for explicitly requested GC's.
-  if (!policy->gc_time_limit_exceeded() ||
-      GCCause::is_user_requested_gc(gc_cause) ||
-      GCCause::is_serviceability_requested_gc(gc_cause)) {
-    IsGCActiveMark mark;
-
-    if (ScavengeBeforeFullGC) {
-      PSScavenge::invoke_no_policy();
-    }
-
-    PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
+  IsGCActiveMark mark;
+
+  if (ScavengeBeforeFullGC) {
+    PSScavenge::invoke_no_policy();
   }
+
+  const bool clear_all_soft_refs =
+    heap->collector_policy()->should_clear_all_soft_refs();
+
+  PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
+                                      maximum_heap_compaction);
 }
 
 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
@@ -1976,6 +1977,11 @@
   PSPermGen* perm_gen = heap->perm_gen();
   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 
+  // The scope of casr should end after code that can change
+  // CollectorPolicy::_should_clear_all_soft_refs.
+  ClearedAllSoftRefs casr(maximum_heap_compaction,
+                          heap->collector_policy());
+
   if (ZapUnusedHeapArea) {
     // Save information needed to minimize mangling
     heap->record_gen_tops_before_GC();
@@ -2109,7 +2115,8 @@
                               old_gen->max_gen_size(),
                               max_eden_size,
                               true /* full gc*/,
-                              gc_cause);
+                              gc_cause,
+                              heap->collector_policy());
 
         heap->resize_old_gen(
           size_policy->calculated_old_free_size_in_bytes());
@@ -2157,19 +2164,6 @@
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
     heap->update_counters();
-
-    if (PrintGCDetails) {
-      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
-        if (size_policy->gc_time_limit_exceeded()) {
-          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        } else {
-          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        }
-      }
-      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
-    }
   }
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
@@ -2376,7 +2370,7 @@
   // Follow code cache roots.
   CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
                           purged_class);
-  follow_stack(cm); // Flush marking stack.
+  cm->follow_marking_stacks(); // Flush marking stack.
 
   // Update subklass/sibling/implementor links of live klasses
   // revisit_klass_stack is used in follow_weak_klass_links().
@@ -2389,8 +2383,7 @@
   SymbolTable::unlink(is_alive_closure());
   StringTable::unlink(is_alive_closure());
 
-  assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
-  assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
+  assert(cm->marking_stacks_empty(), "marking stacks should be empty");
 }
 
 // This should be moved to the shared markSweep code!
@@ -2709,22 +2702,6 @@
   young_gen->move_and_update(cm);
 }
 
-
-void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
-  while(!cm->overflow_stack()->is_empty()) {
-    oop obj = cm->overflow_stack()->pop();
-    obj->follow_contents(cm);
-  }
-
-  oop obj;
-  // obj is a reference!!!
-  while (cm->marking_stack()->pop_local(obj)) {
-    // It would be nice to assert about the type of objects we might
-    // pop, but they can come from anywhere, unfortunately.
-    obj->follow_contents(cm);
-  }
-}
-
 void
 PSParallelCompact::follow_weak_klass_links() {
   // All klasses on the revisit stack are marked at this point.
@@ -2745,7 +2722,7 @@
         &keep_alive_closure);
     }
     // revisit_klass_stack is cleared in reset()
-    follow_stack(cm);
+    cm->follow_marking_stacks();
   }
 }
 
@@ -2776,7 +2753,7 @@
       rms->at(j)->follow_weak_refs(is_alive_closure());
     }
     // revisit_mdo_stack is cleared in reset()
-    follow_stack(cm);
+    cm->follow_marking_stacks();
   }
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu May 13 18:26:35 2010 -0700
@@ -901,7 +901,6 @@
   // Mark live objects
   static void marking_phase(ParCompactionManager* cm,
                             bool maximum_heap_compaction);
-  static void follow_stack(ParCompactionManager* cm);
   static void follow_weak_klass_links();
   static void follow_mdo_weak_refs();
 
@@ -1276,7 +1275,7 @@
       }
     }
   }
-  follow_stack(cm);
+  cm->follow_marking_stacks();
 }
 
 template <class T>
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -187,8 +187,7 @@
 //
 // Note that this method should only be called from the vm_thread while
 // at a safepoint!
-void PSScavenge::invoke()
-{
+void PSScavenge::invoke() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
@@ -197,29 +196,25 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   PSAdaptiveSizePolicy* policy = heap->size_policy();
+  IsGCActiveMark mark;
 
-  // Before each allocation/collection attempt, find out from the
-  // policy object if GCs are, on the whole, taking too long. If so,
-  // bail out without attempting a collection.
-  if (!policy->gc_time_limit_exceeded()) {
-    IsGCActiveMark mark;
+  bool scavenge_was_done = PSScavenge::invoke_no_policy();
 
-    bool scavenge_was_done = PSScavenge::invoke_no_policy();
-
-    PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+  if (UsePerfData)
+    counters->update_full_follows_scavenge(0);
+  if (!scavenge_was_done ||
+      policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
     if (UsePerfData)
-      counters->update_full_follows_scavenge(0);
-    if (!scavenge_was_done ||
-        policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
-      if (UsePerfData)
-        counters->update_full_follows_scavenge(full_follows_scavenge);
+      counters->update_full_follows_scavenge(full_follows_scavenge);
+    GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
+    CollectorPolicy* cp = heap->collector_policy();
+    const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 
-      GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
-      if (UseParallelOldGC) {
-        PSParallelCompact::invoke_no_policy(false);
-      } else {
-        PSMarkSweep::invoke_no_policy(false);
-      }
+    if (UseParallelOldGC) {
+      PSParallelCompact::invoke_no_policy(clear_all_softrefs);
+    } else {
+      PSMarkSweep::invoke_no_policy(clear_all_softrefs);
     }
   }
 }
@@ -447,6 +442,9 @@
       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
       size_policy->update_averages(_survivor_overflow, survived, promoted);
 
+      // A successful scavenge should restart the GC time limit count which is
+      // for full GC's.
+      size_policy->reset_gc_overhead_limit_count();
       if (UseAdaptiveSizePolicy) {
         // Calculate the new survivor size and tenuring threshold
 
@@ -523,7 +521,8 @@
                                    old_gen->max_gen_size(),
                                    max_eden_size,
                                    false  /* full gc*/,
-                                   gc_cause);
+                                   gc_cause,
+                                   heap->collector_policy());
 
         }
         // Resize the young generation at every collection
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2004-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,13 +44,15 @@
     _survivor_size(init_survivor_size),
     _gc_pause_goal_sec(gc_pause_goal_sec),
     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
-    _gc_time_limit_exceeded(false),
-    _print_gc_time_limit_would_be_exceeded(false),
-    _gc_time_limit_count(0),
+    _gc_overhead_limit_exceeded(false),
+    _print_gc_overhead_limit_would_be_exceeded(false),
+    _gc_overhead_limit_count(0),
     _latest_minor_mutator_interval_seconds(0),
     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
     _young_gen_change_for_minor_throughput(0),
     _old_gen_change_for_major_throughput(0) {
+  assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
+    "No opportunity to clear SoftReferences before GC overhead limit");
   _avg_minor_pause    =
     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
@@ -278,6 +280,147 @@
   set_decide_at_full_gc(0);
 }
 
+void AdaptiveSizePolicy::check_gc_overhead_limit(
+                                          size_t young_live,
+                                          size_t eden_live,
+                                          size_t max_old_gen_size,
+                                          size_t max_eden_size,
+                                          bool   is_full_gc,
+                                          GCCause::Cause gc_cause,
+                                          CollectorPolicy* collector_policy) {
+
+  // Ignore explicit GC's.  Exiting here does not set the flag and
+  // does not reset the count.  Updating of the averages for system
+  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
+  if (GCCause::is_user_requested_gc(gc_cause) ||
+      GCCause::is_serviceability_requested_gc(gc_cause)) {
+    return;
+  }
+  // eden_limit is the upper limit on the size of eden based on
+  // the maximum size of the young generation and the sizes
+  // of the survivor space.
+  // The question being asked is whether the gc costs are high
+  // and the space being recovered by a collection is low.
+  // free_in_young_gen is the free space in the young generation
+  // after a collection and promo_live is the free space in the old
+  // generation after a collection.
+  //
+  // Use the minimum of the current value of the live in the
+  // young gen or the average of the live in the young gen.
+  // If the current value drops quickly, that should be taken
+  // into account (i.e., don't trigger if the amount of free
+  // space has suddenly jumped up).  If the current is much
+  // higher than the average, use the average since it represents
+  // the longer term behavor.
+  const size_t live_in_eden =
+    MIN2(eden_live, (size_t) avg_eden_live()->average());
+  const size_t free_in_eden = max_eden_size > live_in_eden ?
+    max_eden_size - live_in_eden : 0;
+  const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
+  const size_t total_free_limit = free_in_old_gen + free_in_eden;
+  const size_t total_mem = max_old_gen_size + max_eden_size;
+  const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
+  const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
+  const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
+  const double gc_cost_limit = GCTimeLimit/100.0;
+  size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
+  // But don't force a promo size below the current promo size. Otherwise,
+  // the promo size will shrink for no good reason.
+  promo_limit = MAX2(promo_limit, _promo_size);
+
+
+  if (PrintAdaptiveSizePolicy && (Verbose ||
+      (free_in_old_gen < (size_t) mem_free_old_limit &&
+       free_in_eden < (size_t) mem_free_eden_limit))) {
+    gclog_or_tty->print_cr(
+          "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
+          " promo_limit: " SIZE_FORMAT
+          " max_eden_size: " SIZE_FORMAT
+          " total_free_limit: " SIZE_FORMAT
+          " max_old_gen_size: " SIZE_FORMAT
+          " max_eden_size: " SIZE_FORMAT
+          " mem_free_limit: " SIZE_FORMAT,
+          promo_limit, max_eden_size, total_free_limit,
+          max_old_gen_size, max_eden_size,
+          (size_t) mem_free_limit);
+  }
+
+  bool print_gc_overhead_limit_would_be_exceeded = false;
+  if (is_full_gc) {
+    if (gc_cost() > gc_cost_limit &&
+      free_in_old_gen < (size_t) mem_free_old_limit &&
+      free_in_eden < (size_t) mem_free_eden_limit) {
+      // Collections, on average, are taking too much time, and
+      //      gc_cost() > gc_cost_limit
+      // we have too little space available after a full gc.
+      //      total_free_limit < mem_free_limit
+      // where
+      //   total_free_limit is the free space available in
+      //     both generations
+      //   total_mem is the total space available for allocation
+      //     in both generations (survivor spaces are not included
+      //     just as they are not included in eden_limit).
+      //   mem_free_limit is a fraction of total_mem judged to be an
+      //     acceptable amount that is still unused.
+      // The heap can ask for the value of this variable when deciding
+      // whether to thrown an OutOfMemory error.
+      // Note that the gc time limit test only works for the collections
+      // of the young gen + tenured gen and not for collections of the
+      // permanent gen.  That is because the calculation of the space
+      // freed by the collection is the free space in the young gen +
+      // tenured gen.
+      // At this point the GC overhead limit is being exceeded.
+      inc_gc_overhead_limit_count();
+      if (UseGCOverheadLimit) {
+        if (gc_overhead_limit_count() >=
+            AdaptiveSizePolicyGCTimeLimitThreshold){
+          // All conditions have been met for throwing an out-of-memory
+          set_gc_overhead_limit_exceeded(true);
+          // Avoid consecutive OOM due to the gc time limit by resetting
+          // the counter.
+          reset_gc_overhead_limit_count();
+        } else {
+          // The required consecutive collections which exceed the
+          // GC time limit may or may not have been reached. We
+          // are approaching that condition and so as not to
+          // throw an out-of-memory before all SoftRef's have been
+          // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
+          // The clearing will be done on the next GC.
+          bool near_limit = gc_overhead_limit_near();
+          if (near_limit) {
+            collector_policy->set_should_clear_all_soft_refs(true);
+            if (PrintGCDetails && Verbose) {
+              gclog_or_tty->print_cr("  Nearing GC overhead limit, "
+                "will be clearing all SoftReference");
+            }
+          }
+        }
+      }
+      // Set this even when the overhead limit will not
+      // cause an out-of-memory.  Diagnostic message indicating
+      // that the overhead limit is being exceeded is sometimes
+      // printed.
+      print_gc_overhead_limit_would_be_exceeded = true;
+
+    } else {
+      // Did not exceed overhead limits
+      reset_gc_overhead_limit_count();
+    }
+  }
+
+  if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
+    if (gc_overhead_limit_exceeded()) {
+      gclog_or_tty->print_cr("      GC is exceeding overhead limit "
+        "of %d%%", GCTimeLimit);
+      reset_gc_overhead_limit_count();
+    } else if (print_gc_overhead_limit_would_be_exceeded) {
+      assert(gc_overhead_limit_count() > 0, "Should not be printing");
+      gclog_or_tty->print_cr("      GC would exceed overhead limit "
+        "of %d%% %d consecutive time(s)",
+        GCTimeLimit, gc_overhead_limit_count());
+    }
+  }
+}
 // Printing
 
 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2004-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 // Forward decls
 class elapsedTimer;
+class CollectorPolicy;
 
 class AdaptiveSizePolicy : public CHeapObj {
  friend class GCAdaptivePolicyCounters;
@@ -75,13 +76,16 @@
 
   // This is a hint for the heap:  we've detected that gc times
   // are taking longer than GCTimeLimit allows.
-  bool _gc_time_limit_exceeded;
-  // Use for diagnostics only.  If UseGCTimeLimit is false,
+  bool _gc_overhead_limit_exceeded;
+  // Use for diagnostics only.  If UseGCOverheadLimit is false,
   // this variable is still set.
-  bool _print_gc_time_limit_would_be_exceeded;
+  bool _print_gc_overhead_limit_would_be_exceeded;
   // Count of consecutive GC that have exceeded the
   // GC time limit criterion.
-  uint _gc_time_limit_count;
+  uint _gc_overhead_limit_count;
+  // This flag signals that GCTimeLimit is being exceeded
+  // but may not have done so for the required number of consequetive
+  // collections.
 
   // Minor collection timers used to determine both
   // pause and interval times for collections.
@@ -406,22 +410,21 @@
   // Most heaps will choose to throw an OutOfMemoryError when
   // this occurs but it is up to the heap to request this information
   // of the policy
-  bool gc_time_limit_exceeded() {
-    return _gc_time_limit_exceeded;
-  }
-  void set_gc_time_limit_exceeded(bool v) {
-    _gc_time_limit_exceeded = v;
+  bool gc_overhead_limit_exceeded() {
+    return _gc_overhead_limit_exceeded;
   }
-  bool print_gc_time_limit_would_be_exceeded() {
-    return _print_gc_time_limit_would_be_exceeded;
-  }
-  void set_print_gc_time_limit_would_be_exceeded(bool v) {
-    _print_gc_time_limit_would_be_exceeded = v;
+  void set_gc_overhead_limit_exceeded(bool v) {
+    _gc_overhead_limit_exceeded = v;
   }
 
-  uint gc_time_limit_count() { return _gc_time_limit_count; }
-  void reset_gc_time_limit_count() { _gc_time_limit_count = 0; }
-  void inc_gc_time_limit_count() { _gc_time_limit_count++; }
+  // Tests conditions indicate the GC overhead limit is being approached.
+  bool gc_overhead_limit_near() {
+    return gc_overhead_limit_count() >=
+        (AdaptiveSizePolicyGCTimeLimitThreshold - 1);
+  }
+  uint gc_overhead_limit_count() { return _gc_overhead_limit_count; }
+  void reset_gc_overhead_limit_count() { _gc_overhead_limit_count = 0; }
+  void inc_gc_overhead_limit_count() { _gc_overhead_limit_count++; }
   // accessors for flags recording the decisions to resize the
   // generations to meet the pause goal.
 
@@ -436,6 +439,16 @@
   int decide_at_full_gc() { return _decide_at_full_gc; }
   void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; }
 
+  // Check the conditions for an out-of-memory due to excessive GC time.
+  // Set _gc_overhead_limit_exceeded if all the conditions have been met.
+  void check_gc_overhead_limit(size_t young_live,
+                               size_t eden_live,
+                               size_t max_old_gen_size,
+                               size_t max_eden_size,
+                               bool   is_full_gc,
+                               GCCause::Cause gc_cause,
+                               CollectorPolicy* collector_policy);
+
   // Printing support
   virtual bool print_adaptive_size_policy_on(outputStream* st) const;
   bool print_adaptive_size_policy_on(outputStream* st, int
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Thu May 13 18:26:35 2010 -0700
@@ -25,8 +25,9 @@
 #include "incls/_precompiled.incl"
 #include "incls/_markSweep.cpp.incl"
 
-GrowableArray<oop>*     MarkSweep::_marking_stack       = NULL;
-GrowableArray<Klass*>*  MarkSweep::_revisit_klass_stack = NULL;
+GrowableArray<oop>*          MarkSweep::_marking_stack = NULL;
+GrowableArray<ObjArrayTask>* MarkSweep::_objarray_stack = NULL;
+GrowableArray<Klass*>*       MarkSweep::_revisit_klass_stack = NULL;
 GrowableArray<DataLayout*>*  MarkSweep::_revisit_mdo_stack = NULL;
 
 GrowableArray<oop>*     MarkSweep::_preserved_oop_stack = NULL;
@@ -104,11 +105,19 @@
 void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
 
 void MarkSweep::follow_stack() {
-  while (!_marking_stack->is_empty()) {
-    oop obj = _marking_stack->pop();
-    assert (obj->is_gc_marked(), "p must be marked");
-    obj->follow_contents();
-  }
+  do {
+    while (!_marking_stack->is_empty()) {
+      oop obj = _marking_stack->pop();
+      assert (obj->is_gc_marked(), "p must be marked");
+      obj->follow_contents();
+    }
+    // Process ObjArrays one at a time to avoid marking stack bloat.
+    if (!_objarray_stack->is_empty()) {
+      ObjArrayTask task = _objarray_stack->pop();
+      objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
+      k->oop_follow_contents(task.obj(), task.index());
+    }
+  } while (!_marking_stack->is_empty() || !_objarray_stack->is_empty());
 }
 
 MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Thu May 13 18:26:35 2010 -0700
@@ -110,8 +110,9 @@
   // Vars
   //
  protected:
-  // Traversal stack used during phase1
+  // Traversal stacks used during phase1
   static GrowableArray<oop>*             _marking_stack;
+  static GrowableArray<ObjArrayTask>*    _objarray_stack;
   // Stack for live klasses to revisit at end of marking phase
   static GrowableArray<Klass*>*          _revisit_klass_stack;
   // Set (stack) of MDO's to revisit at end of marking phase
@@ -188,6 +189,7 @@
   template <class T> static inline void mark_and_follow(T* p);
   // Check mark and maybe push on marking stack
   template <class T> static inline void mark_and_push(T* p);
+  static inline void push_objarray(oop obj, size_t index);
 
   static void follow_stack();   // Empty marking stack.
 
--- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Thu May 13 18:26:35 2010 -0700
@@ -77,6 +77,12 @@
   }
 }
 
+void MarkSweep::push_objarray(oop obj, size_t index) {
+  ObjArrayTask task(obj, index);
+  assert(task.is_valid(), "bad ObjArrayTask");
+  _objarray_stack->push(task);
+}
+
 template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu May 13 18:26:35 2010 -0700
@@ -115,11 +115,25 @@
 void VM_GC_HeapInspection::doit() {
   HandleMark hm;
   CollectedHeap* ch = Universe::heap();
+  ch->ensure_parsability(false); // must happen, even if collection does
+                                 // not happen (e.g. due to GC_locker)
   if (_full_gc) {
-    ch->collect_as_vm_thread(GCCause::_heap_inspection);
-  } else {
-    // make the heap parsable (no need to retire TLABs)
-    ch->ensure_parsability(false);
+    // The collection attempt below would be skipped anyway if
+    // the gc locker is held. The following dump may then be a tad
+    // misleading to someone expecting only live objects to show
+    // up in the dump (see CR 6944195). Just issue a suitable warning
+    // in that case and do not attempt to do a collection.
+    // The latter is a subtle point, because even a failed attempt
+    // to GC will, in fact, induce one in the future, which we
+    // probably want to avoid in this case because the GC that we may
+    // be about to attempt holds value for us only
+    // if it happens now and not if it happens in the eventual
+    // future.
+    if (GC_locker::is_active()) {
+      warning("GC locker is held; pre-dump GC was skipped");
+    } else {
+      ch->collect_as_vm_thread(GCCause::_heap_inspection);
+    }
   }
   HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
 }
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,8 +89,19 @@
     if (full) {
       _full_gc_count_before = full_gc_count_before;
     }
+    // In ParallelScavengeHeap::mem_allocate() collections can be
+    // executed within a loop and _all_soft_refs_clear can be set
+    // true after they have been cleared by a collection and another
+    // collection started so that _all_soft_refs_clear can be true
+    // when this collection is started.  Don't assert that
+    // _all_soft_refs_clear have to be false here even though
+    // mutators have run.  Soft refs will be cleared again in this
+    // collection.
   }
-  ~VM_GC_Operation() {}
+  ~VM_GC_Operation() {
+    CollectedHeap* ch = Universe::heap();
+    ch->collector_policy()->set_all_soft_refs_clear(false);
+  }
 
   // Acquire the reference synchronization lock
   virtual bool doit_prologue();
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 class ThreadClosure;
 class AdaptiveSizePolicy;
 class Thread;
+class CollectorPolicy;
 
 //
 // CollectedHeap
@@ -506,6 +507,9 @@
   // Return the AdaptiveSizePolicy for the heap.
   virtual AdaptiveSizePolicy* size_policy() = 0;
 
+  // Return the CollectorPolicy for the heap
+  virtual CollectorPolicy* collector_policy() const = 0;
+
   // Iterate over all the ref-containing fields of all objects, calling
   // "cl.do_oop" on each. This includes objects in permanent memory.
   virtual void oop_iterate(OopClosure* cl) = 0;
--- a/src/share/vm/includeDB_compiler1	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/includeDB_compiler1	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -246,6 +246,7 @@
 c1_LIRGenerator.cpp                     c1_LIRGenerator.hpp
 c1_LIRGenerator.cpp                     c1_ValueStack.hpp
 c1_LIRGenerator.cpp                     ciArrayKlass.hpp
+c1_LIRGenerator.cpp                     ciCPCache.hpp
 c1_LIRGenerator.cpp                     ciInstance.hpp
 c1_LIRGenerator.cpp                     heapRegion.hpp
 c1_LIRGenerator.cpp                     sharedRuntime.hpp
--- a/src/share/vm/includeDB_core	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/includeDB_core	Thu May 13 18:26:35 2010 -0700
@@ -176,6 +176,7 @@
 arguments.cpp                           oop.inline.hpp
 arguments.cpp                           os_<os_family>.inline.hpp
 arguments.cpp                           referenceProcessor.hpp
+arguments.cpp                           taskqueue.hpp
 arguments.cpp                           universe.inline.hpp
 arguments.cpp                           vm_version_<arch>.hpp
 
@@ -540,6 +541,7 @@
 
 ciCPCache.cpp                           cpCacheOop.hpp
 ciCPCache.cpp                           ciCPCache.hpp
+ciCPCache.cpp                           ciUtilities.hpp
 
 ciCPCache.hpp                           ciClassList.hpp
 ciCPCache.hpp                           ciObject.hpp
@@ -1021,6 +1023,7 @@
 codeCache.cpp                           codeCache.hpp
 codeCache.cpp                           dependencies.hpp
 codeCache.cpp                           gcLocker.hpp
+codeCache.cpp                           handles.inline.hpp
 codeCache.cpp                           icache.hpp
 codeCache.cpp                           iterator.hpp
 codeCache.cpp                           java.hpp
@@ -2014,6 +2017,7 @@
 init.cpp                                icBuffer.hpp
 init.cpp                                icache.hpp
 init.cpp                                init.hpp
+init.cpp                                methodHandles.hpp
 init.cpp                                safepoint.hpp
 init.cpp                                sharedRuntime.hpp
 init.cpp                                universe.hpp
@@ -2022,6 +2026,7 @@
 
 instanceKlass.cpp                       collectedHeap.inline.hpp
 instanceKlass.cpp                       compileBroker.hpp
+instanceKlass.cpp                       dtrace.hpp
 instanceKlass.cpp                       fieldDescriptor.hpp
 instanceKlass.cpp                       genOopClosures.inline.hpp
 instanceKlass.cpp                       handles.inline.hpp
@@ -2481,6 +2486,7 @@
 jvm.cpp                                 collectedHeap.inline.hpp
 jvm.cpp                                 copy.hpp
 jvm.cpp                                 defaultStream.hpp
+jvm.cpp                                 dtrace.hpp
 jvm.cpp                                 dtraceJSDT.hpp
 jvm.cpp                                 events.hpp
 jvm.cpp                                 handles.inline.hpp
@@ -2722,8 +2728,10 @@
 
 markSweep.cpp                           compileBroker.hpp
 markSweep.cpp                           methodDataOop.hpp
+markSweep.cpp				objArrayKlass.inline.hpp
 
 markSweep.hpp                           collectedHeap.hpp
+markSweep.hpp				taskqueue.hpp
 
 memRegion.cpp                           globals.hpp
 memRegion.cpp                           memRegion.hpp
@@ -2868,6 +2876,7 @@
 methodHandles.cpp                       oopFactory.hpp
 methodHandles.cpp                       reflection.hpp
 methodHandles.cpp                       signature.hpp
+methodHandles.cpp                       stubRoutines.hpp
 methodHandles.cpp                       symbolTable.hpp
 
 methodHandles_<arch>.cpp                allocation.inline.hpp
@@ -3052,8 +3061,10 @@
 objArrayKlass.cpp                       genOopClosures.inline.hpp
 objArrayKlass.cpp                       handles.inline.hpp
 objArrayKlass.cpp                       instanceKlass.hpp
+objArrayKlass.cpp                       markSweep.inline.hpp
 objArrayKlass.cpp                       mutexLocker.hpp
 objArrayKlass.cpp                       objArrayKlass.hpp
+objArrayKlass.cpp                       objArrayKlass.inline.hpp
 objArrayKlass.cpp                       objArrayKlassKlass.hpp
 objArrayKlass.cpp                       objArrayOop.hpp
 objArrayKlass.cpp                       oop.inline.hpp
@@ -3064,11 +3075,12 @@
 objArrayKlass.cpp                       universe.inline.hpp
 objArrayKlass.cpp                       vmSymbols.hpp
 
-
 objArrayKlass.hpp                       arrayKlass.hpp
 objArrayKlass.hpp                       instanceKlass.hpp
 objArrayKlass.hpp                       specialized_oop_closures.hpp
 
+objArrayKlass.inline.hpp		objArrayKlass.hpp
+
 objArrayKlassKlass.cpp                  collectedHeap.inline.hpp
 objArrayKlassKlass.cpp                  instanceKlass.hpp
 objArrayKlassKlass.cpp                  javaClasses.hpp
@@ -4094,6 +4106,7 @@
 task.hpp                                top.hpp
 
 taskqueue.cpp                           debug.hpp
+taskqueue.cpp				oop.inline.hpp
 taskqueue.cpp                           os.hpp
 taskqueue.cpp                           taskqueue.hpp
 taskqueue.cpp                           thread_<os_family>.inline.hpp
@@ -4447,6 +4460,7 @@
 
 unsafe.cpp                              allocation.inline.hpp
 unsafe.cpp                              copy.hpp
+unsafe.cpp                              dtrace.hpp
 unsafe.cpp                              globals.hpp
 unsafe.cpp                              interfaceSupport.hpp
 unsafe.cpp                              jni.h
@@ -4618,6 +4632,7 @@
 
 vmThread.cpp                            collectedHeap.hpp
 vmThread.cpp                            compileBroker.hpp
+vmThread.cpp                            dtrace.hpp
 vmThread.cpp                            events.hpp
 vmThread.cpp                            interfaceSupport.hpp
 vmThread.cpp                            methodOop.hpp
--- a/src/share/vm/includeDB_gc_parallel	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/includeDB_gc_parallel	Thu May 13 18:26:35 2010 -0700
@@ -115,10 +115,14 @@
 objArrayKlass.cpp                       g1CollectedHeap.inline.hpp
 objArrayKlass.cpp                       g1OopClosures.inline.hpp
 objArrayKlass.cpp                       oop.pcgc.inline.hpp
+objArrayKlass.cpp                       psCompactionManager.hpp
 objArrayKlass.cpp                       psPromotionManager.inline.hpp
 objArrayKlass.cpp                       psScavenge.inline.hpp
 objArrayKlass.cpp                       parOopClosures.inline.hpp
 
+objArrayKlass.inline.hpp		psCompactionManager.inline.hpp
+objArrayKlass.inline.hpp		psParallelCompact.hpp
+
 oop.pcgc.inline.hpp                     parNewGeneration.hpp
 oop.pcgc.inline.hpp                     parallelScavengeHeap.hpp
 oop.pcgc.inline.hpp                     psCompactionManager.hpp
--- a/src/share/vm/includeDB_zero	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/includeDB_zero	Thu May 13 18:26:35 2010 -0700
@@ -1,6 +1,6 @@
 //
 // Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
-// Copyright 2009 Red Hat, Inc.
+// Copyright 2009, 2010 Red Hat, Inc.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
 
+cppInterpreter_<arch>.cpp               stack_<arch>.inline.hpp
+
 entryFrame_<arch>.hpp                   javaCalls.hpp
 entryFrame_<arch>.hpp                   stack_<arch>.hpp
 
@@ -47,9 +49,19 @@
 interpreterFrame_<arch>.hpp             stack_<arch>.hpp
 interpreterFrame_<arch>.hpp             thread.hpp
 
+interpreterRT_<arch>.cpp                stack_<arch>.inline.hpp
+
 sharkFrame_<arch>.hpp                   methodOop.hpp
 sharkFrame_<arch>.hpp                   stack_<arch>.hpp
 
 stack_<arch>.hpp                        sizes.hpp
 
+stack_<arch>.inline.hpp                 stack_<arch>.hpp
+stack_<arch>.inline.hpp                 thread.hpp
+
+stack_<arch>.cpp                        interpreterRuntime.hpp
+stack_<arch>.cpp                        stack_<arch>.hpp
+
+stubGenerator_<arch>.cpp                stack_<arch>.inline.hpp
+
 thread.hpp                              stack_<arch>.hpp
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu May 13 18:26:35 2010 -0700
@@ -2328,6 +2328,17 @@
       }
 
       DEFAULT:
+#ifdef ZERO
+          // Some zero configurations use the C++ interpreter as a
+          // fallback interpreter and have support for platform
+          // specific fast bytecodes which aren't supported here, so
+          // redispatch to the equivalent non-fast bytecode when they
+          // are encountered.
+          if (Bytecodes::is_defined((Bytecodes::Code)opcode)) {
+              opcode = (jubyte)Bytecodes::java_code((Bytecodes::Code)opcode);
+              goto opcode_switch;
+          }
+#endif
           fatal2("\t*** Unimplemented opcode: %d = %s\n",
                  opcode, Bytecodes::name((Bytecodes::Code)opcode));
           goto finish;
--- a/src/share/vm/memory/collectorPolicy.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,6 +112,11 @@
   }
 }
 
+bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
+  bool result = _should_clear_all_soft_refs;
+  set_should_clear_all_soft_refs(false);
+  return result;
+}
 
 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
                                            int max_covered_regions) {
@@ -126,6 +131,17 @@
   }
 }
 
+void CollectorPolicy::cleared_all_soft_refs() {
+  // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
+  // have been cleared in the last collection but if the gc overhear
+  // limit continues to be near, SoftRefs should still be cleared.
+  if (size_policy() != NULL) {
+    _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
+  }
+  _all_soft_refs_clear = true;
+}
+
+
 // GenCollectorPolicy methods.
 
 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
@@ -489,6 +505,12 @@
 
   debug_only(gch->check_for_valid_allocation_state());
   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
+
+  // In general gc_overhead_limit_was_exceeded should be false so
+  // set it so here and reset it to true only if the gc time
+  // limit is being exceeded as checked below.
+  *gc_overhead_limit_was_exceeded = false;
+
   HeapWord* result = NULL;
 
   // Loop until the allocation is satisified,
@@ -524,12 +546,6 @@
         return result;
       }
 
-      // There are NULL's returned for different circumstances below.
-      // In general gc_overhead_limit_was_exceeded should be false so
-      // set it so here and reset it to true only if the gc time
-      // limit is being exceeded as checked below.
-      *gc_overhead_limit_was_exceeded = false;
-
       if (GC_locker::is_active_and_needs_gc()) {
         if (is_tlab) {
           return NULL;  // Caller will retry allocating individual object
@@ -568,18 +584,6 @@
       gc_count_before = Universe::heap()->total_collections();
     }
 
-    // Allocation has failed and a collection is about
-    // to be done.  If the gc time limit was exceeded the
-    // last time a collection was done, return NULL so
-    // that an out-of-memory will be thrown.  Clear
-    // gc_time_limit_exceeded so that subsequent attempts
-    // at a collection will be made.
-    if (size_policy()->gc_time_limit_exceeded()) {
-      *gc_overhead_limit_was_exceeded = true;
-      size_policy()->set_gc_time_limit_exceeded(false);
-      return NULL;
-    }
-
     VM_GenCollectForAllocation op(size,
                                   is_tlab,
                                   gc_count_before);
@@ -590,6 +594,24 @@
          assert(result == NULL, "must be NULL if gc_locked() is true");
          continue;  // retry and/or stall as necessary
       }
+
+      // Allocation has failed and a collection
+      // has been done.  If the gc time limit was exceeded the
+      // this time, return NULL so that an out-of-memory
+      // will be thrown.  Clear gc_overhead_limit_exceeded
+      // so that the overhead exceeded does not persist.
+
+      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
+      const bool softrefs_clear = all_soft_refs_clear();
+      assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+      if (limit_exceeded && softrefs_clear) {
+        *gc_overhead_limit_was_exceeded = true;
+        size_policy()->set_gc_overhead_limit_exceeded(false);
+        if (op.result() != NULL) {
+          CollectedHeap::fill_with_object(op.result(), size);
+        }
+        return NULL;
+      }
       assert(result == NULL || gch->is_in_reserved(result),
              "result not in heap");
       return result;
@@ -688,6 +710,9 @@
     return result;
   }
 
+  assert(!should_clear_all_soft_refs(),
+    "Flag should have been handled and cleared prior to this point");
+
   // What else?  We might try synchronous finalization later.  If the total
   // space available is large enough for the allocation, then a more
   // complete compaction phase than we've tried so far might be
--- a/src/share/vm/memory/collectorPolicy.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/memory/collectorPolicy.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,12 +69,28 @@
   size_t _min_alignment;
   size_t _max_alignment;
 
+  // The sizing of the heap are controlled by a sizing policy.
+  AdaptiveSizePolicy* _size_policy;
+
+  // Set to true when policy wants soft refs cleared.
+  // Reset to false by gc after it clears all soft refs.
+  bool _should_clear_all_soft_refs;
+  // Set to true by the GC if the just-completed gc cleared all
+  // softrefs.  This is set to true whenever a gc clears all softrefs, and
+  // set to false each time gc returns to the mutator.  For example, in the
+  // ParallelScavengeHeap case the latter would be done toward the end of
+  // mem_allocate() where it returns op.result()
+  bool _all_soft_refs_clear;
+
   CollectorPolicy() :
     _min_alignment(1),
     _max_alignment(1),
     _initial_heap_byte_size(0),
     _max_heap_byte_size(0),
-    _min_heap_byte_size(0)
+    _min_heap_byte_size(0),
+    _size_policy(NULL),
+    _should_clear_all_soft_refs(false),
+    _all_soft_refs_clear(false)
   {}
 
  public:
@@ -98,6 +114,19 @@
     G1CollectorPolicyKind
   };
 
+  AdaptiveSizePolicy* size_policy() { return _size_policy; }
+  bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
+  void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
+  // Returns the current value of _should_clear_all_soft_refs.
+  // _should_clear_all_soft_refs is set to false as a side effect.
+  bool use_should_clear_all_soft_refs(bool v);
+  bool all_soft_refs_clear() { return _all_soft_refs_clear; }
+  void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
+
+  // Called by the GC after Soft Refs have been cleared to indicate
+  // that the request in _should_clear_all_soft_refs has been fulfilled.
+  void cleared_all_soft_refs();
+
   // Identification methods.
   virtual GenCollectorPolicy*           as_generation_policy()            { return NULL; }
   virtual TwoGenerationCollectorPolicy* as_two_generation_policy()        { return NULL; }
@@ -165,6 +194,22 @@
 
 };
 
+class ClearedAllSoftRefs : public StackObj {
+  bool _clear_all_soft_refs;
+  CollectorPolicy* _collector_policy;
+ public:
+  ClearedAllSoftRefs(bool clear_all_soft_refs,
+                     CollectorPolicy* collector_policy) :
+    _clear_all_soft_refs(clear_all_soft_refs),
+    _collector_policy(collector_policy) {}
+
+  ~ClearedAllSoftRefs() {
+    if (_clear_all_soft_refs) {
+      _collector_policy->cleared_all_soft_refs();
+    }
+  }
+};
+
 class GenCollectorPolicy : public CollectorPolicy {
  protected:
   size_t _min_gen0_size;
@@ -173,10 +218,6 @@
 
   GenerationSpec **_generations;
 
-  // The sizing of the different generations in the heap are controlled
-  // by a sizing policy.
-  AdaptiveSizePolicy* _size_policy;
-
   // Return true if an allocation should be attempted in the older
   // generation if it fails in the younger generation.  Return
   // false, otherwise.
@@ -236,14 +277,11 @@
   virtual size_t large_typearray_limit();
 
   // Adaptive size policy
-  AdaptiveSizePolicy* size_policy() { return _size_policy; }
   virtual void initialize_size_policy(size_t init_eden_size,
                                       size_t init_promo_size,
                                       size_t init_survivor_size);
-
 };
 
-
 // All of hotspot's current collectors are subtypes of this
 // class. Currently, these collectors all use the same gen[0],
 // but have different gen[1] types. If we add another subtype
--- a/src/share/vm/memory/defNewGeneration.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -594,6 +594,10 @@
     _tenuring_threshold =
       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 
+    // A successful scavenge should restart the GC time limit count which is
+    // for full GC's.
+    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
+    size_policy->reset_gc_overhead_limit_count();
     if (PrintGC && !PrintGCDetails) {
       gch->print_heap_change(gch_prev_used);
     }
--- a/src/share/vm/memory/genCollectedHeap.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -428,7 +428,8 @@
   assert(my_thread->is_VM_thread() ||
          my_thread->is_ConcurrentGC_thread(),
          "incorrect thread type capability");
-  assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock");
+  assert(Heap_lock->is_locked(),
+         "the requesting thread should have the Heap_lock");
   guarantee(!is_gc_active(), "collection is not reentrant");
   assert(max_level < n_gens(), "sanity check");
 
@@ -436,6 +437,11 @@
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
 
+  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
+                          collector_policy()->should_clear_all_soft_refs();
+
+  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
+
   const size_t perm_prev_used = perm_gen()->used();
 
   if (PrintHeapAtGC) {
@@ -560,11 +566,11 @@
           if (rp->discovery_is_atomic()) {
             rp->verify_no_references_recorded();
             rp->enable_discovery();
-            rp->setup_policy(clear_all_soft_refs);
+            rp->setup_policy(do_clear_all_soft_refs);
           } else {
             // collect() below will enable discovery as appropriate
           }
-          _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
+          _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
           if (!rp->enqueuing_is_done()) {
             rp->enqueue_discovered_references();
           } else {
--- a/src/share/vm/memory/genMarkSweep.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,13 @@
   bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+#ifdef ASSERT
+  if (gch->collector_policy()->should_clear_all_soft_refs()) {
+    assert(clear_all_softrefs, "Policy should have been checked earlier");
+  }
+#endif
+
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
@@ -44,7 +51,6 @@
 
   // Increment the invocation count for the permanent generation, since it is
   // implicitly collected whenever we do a full mark sweep collection.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
   gch->perm_gen()->stat_record()->invocations++;
 
   // Capture heap size before collection for printing.
@@ -159,6 +165,7 @@
   _preserved_oop_stack = NULL;
 
   _marking_stack       = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
+  _objarray_stack      = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
 
   int size = SystemDictionary::number_of_classes() * 2;
   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
@@ -194,7 +201,6 @@
 
 
 void GenMarkSweep::deallocate_stacks() {
-
   if (!UseG1GC) {
     GenCollectedHeap* gch = GenCollectedHeap::heap();
     gch->release_scratch();
@@ -208,6 +214,7 @@
   }
 
   delete _marking_stack;
+  delete _objarray_stack;
   delete _revisit_klass_stack;
   delete _revisit_mdo_stack;
 
--- a/src/share/vm/memory/genOopClosures.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/memory/genOopClosures.hpp	Thu May 13 18:26:35 2010 -0700
@@ -28,10 +28,10 @@
 class CardTableModRefBS;
 class DefNewGeneration;
 
-template<class E> class GenericTaskQueue;
-typedef GenericTaskQueue<oop> OopTaskQueue;
-template<class E> class GenericTaskQueueSet;
-typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
+template<class E, unsigned int N> class GenericTaskQueue;
+typedef GenericTaskQueue<oop, TASKQUEUE_SIZE> OopTaskQueue;
+template<class T> class GenericTaskQueueSet;
+typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
 
 // Closure for iterating roots from a particular generation
 // Note: all classes deriving from this MUST call this do_barrier
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Thu May 13 18:26:35 2010 -0700
@@ -111,7 +111,22 @@
 
   // Allocate size HeapWords. The memory is NOT initialized to zero.
   inline HeapWord* allocate(size_t size);
-  static size_t alignment_reserve()              { return align_object_size(typeArrayOopDesc::header_size(T_INT)); }
+
+  // Reserve space at the end of TLAB
+  static size_t end_reserve() {
+    int reserve_size = typeArrayOopDesc::header_size(T_INT);
+    if (AllocatePrefetchStyle == 3) {
+      // BIS is used to prefetch - we need a space for it.
+      // +1 for rounding up to next cache line +1 to be safe
+      int lines = AllocatePrefetchLines + 2;
+      int step_size = AllocatePrefetchStepSize;
+      int distance = AllocatePrefetchDistance;
+      int prefetch_end = (distance + step_size*lines)/(int)HeapWordSize;
+      reserve_size = MAX2(reserve_size, prefetch_end);
+    }
+    return reserve_size;
+  }
+  static size_t alignment_reserve()              { return align_object_size(end_reserve()); }
   static size_t alignment_reserve_in_bytes()     { return alignment_reserve() * HeapWordSize; }
 
   // Return tlab size or remaining space in eden such that the
--- a/src/share/vm/oops/instanceKlass.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Thu May 13 18:26:35 2010 -0700
@@ -25,6 +25,58 @@
 # include "incls/_precompiled.incl"
 # include "incls/_instanceKlass.cpp.incl"
 
+#ifdef DTRACE_ENABLED
+
+HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
+  char*, intptr_t, oop, intptr_t);
+HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
+  char*, intptr_t, oop, intptr_t, int);
+HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
+  char*, intptr_t, oop, intptr_t, int);
+HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
+  char*, intptr_t, oop, intptr_t, int);
+HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
+  char*, intptr_t, oop, intptr_t, int);
+HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
+  char*, intptr_t, oop, intptr_t, int);
+HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
+  char*, intptr_t, oop, intptr_t, int);
+HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
+  char*, intptr_t, oop, intptr_t, int);
+
+#define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
+  {                                                              \
+    char* data = NULL;                                           \
+    int len = 0;                                                 \
+    symbolOop name = (clss)->name();                             \
+    if (name != NULL) {                                          \
+      data = (char*)name->bytes();                               \
+      len = name->utf8_length();                                 \
+    }                                                            \
+    HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
+      data, len, (clss)->class_loader(), thread_type);           \
+  }
+
+#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
+  {                                                              \
+    char* data = NULL;                                           \
+    int len = 0;                                                 \
+    symbolOop name = (clss)->name();                             \
+    if (name != NULL) {                                          \
+      data = (char*)name->bytes();                               \
+      len = name->utf8_length();                                 \
+    }                                                            \
+    HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
+      data, len, (clss)->class_loader(), thread_type, wait);     \
+  }
+
+#else //  ndef DTRACE_ENABLED
+
+#define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
+#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
+
+#endif //  ndef DTRACE_ENABLED
+
 bool instanceKlass::should_be_initialized() const {
   return !is_initialized();
 }
@@ -292,6 +344,10 @@
   // A class could already be verified, since it has been reflected upon.
   this_oop->link_class(CHECK);
 
+  DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
+
+  bool wait = false;
+
   // refer to the JVM book page 47 for description of steps
   // Step 1
   { ObjectLocker ol(this_oop, THREAD);
@@ -303,19 +359,25 @@
     // we might end up throwing IE from link/symbol resolution sites
     // that aren't expected to throw.  This would wreak havoc.  See 6320309.
     while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
+        wait = true;
       ol.waitUninterruptibly(CHECK);
     }
 
     // Step 3
-    if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self))
+    if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
+      DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
       return;
+    }
 
     // Step 4
-    if (this_oop->is_initialized())
+    if (this_oop->is_initialized()) {
+      DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
       return;
+    }
 
     // Step 5
     if (this_oop->is_in_error_state()) {
+      DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
       ResourceMark rm(THREAD);
       const char* desc = "Could not initialize class ";
       const char* className = this_oop->external_name();
@@ -348,6 +410,7 @@
         this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
       }
+      DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
       THROW_OOP(e());
     }
   }
@@ -356,6 +419,7 @@
   {
     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
     JavaThread* jt = (JavaThread*)THREAD;
+    DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
     // Timer includes any side effects of class initialization (resolution,
     // etc), but not recursive entry into call_class_initializer().
     PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
@@ -383,6 +447,7 @@
       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
     }
+    DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
     if (e->is_a(SystemDictionary::Error_klass())) {
       THROW_OOP(e());
     } else {
@@ -392,6 +457,7 @@
                 &args);
     }
   }
+  DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
 }
 
 
--- a/src/share/vm/oops/objArrayKlass.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/oops/objArrayKlass.cpp	Thu May 13 18:26:35 2010 -0700
@@ -314,24 +314,24 @@
 
 void objArrayKlass::oop_follow_contents(oop obj) {
   assert (obj->is_array(), "obj must be array");
-  objArrayOop a = objArrayOop(obj);
-  a->follow_header();
-  ObjArrayKlass_OOP_ITERATE( \
-    a, p, \
-    /* we call mark_and_follow here to avoid excessive marking stack usage */ \
-    MarkSweep::mark_and_follow(p))
+  objArrayOop(obj)->follow_header();
+  if (UseCompressedOops) {
+    objarray_follow_contents<narrowOop>(obj, 0);
+  } else {
+    objarray_follow_contents<oop>(obj, 0);
+  }
 }
 
 #ifndef SERIALGC
 void objArrayKlass::oop_follow_contents(ParCompactionManager* cm,
                                         oop obj) {
-  assert (obj->is_array(), "obj must be array");
-  objArrayOop a = objArrayOop(obj);
-  a->follow_header(cm);
-  ObjArrayKlass_OOP_ITERATE( \
-    a, p, \
-    /* we call mark_and_follow here to avoid excessive marking stack usage */ \
-    PSParallelCompact::mark_and_follow(cm, p))
+  assert(obj->is_array(), "obj must be array");
+  objArrayOop(obj)->follow_header(cm);
+  if (UseCompressedOops) {
+    objarray_follow_contents<narrowOop>(cm, obj, 0);
+  } else {
+    objarray_follow_contents<oop>(cm, obj, 0);
+  }
 }
 #endif // SERIALGC
 
--- a/src/share/vm/oops/objArrayKlass.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/oops/objArrayKlass.hpp	Thu May 13 18:26:35 2010 -0700
@@ -91,10 +91,18 @@
 
   // Garbage collection
   void oop_follow_contents(oop obj);
+  inline void oop_follow_contents(oop obj, int index);
+  template <class T> inline void objarray_follow_contents(oop obj, int index);
+
   int  oop_adjust_pointers(oop obj);
 
   // Parallel Scavenge and Parallel Old
   PARALLEL_GC_DECLS
+#ifndef SERIALGC
+  inline void oop_follow_contents(ParCompactionManager* cm, oop obj, int index);
+  template <class T> inline void
+    objarray_follow_contents(ParCompactionManager* cm, oop obj, int index);
+#endif // !SERIALGC
 
   // Iterators
   int oop_oop_iterate(oop obj, OopClosure* blk) {
@@ -131,5 +139,4 @@
   void oop_verify_on(oop obj, outputStream* st);
   void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
   void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
-
 };
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/oops/objArrayKlass.inline.hpp	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+void objArrayKlass::oop_follow_contents(oop obj, int index) {
+  if (UseCompressedOops) {
+    objarray_follow_contents<narrowOop>(obj, index);
+  } else {
+    objarray_follow_contents<oop>(obj, index);
+  }
+}
+
+template <class T>
+void objArrayKlass::objarray_follow_contents(oop obj, int index) {
+  objArrayOop a = objArrayOop(obj);
+  const size_t len = size_t(a->length());
+  const size_t beg_index = size_t(index);
+  assert(beg_index < len || len == 0, "index too large");
+
+  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
+  const size_t end_index = beg_index + stride;
+  T* const base = (T*)a->base();
+  T* const beg = base + beg_index;
+  T* const end = base + end_index;
+
+  // Push the non-NULL elements of the next stride on the marking stack.
+  for (T* e = beg; e < end; e++) {
+    MarkSweep::mark_and_push<T>(e);
+  }
+
+  if (end_index < len) {
+    MarkSweep::push_objarray(a, end_index); // Push the continuation.
+  }
+}
+
+#ifndef SERIALGC
+void objArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj,
+                                        int index) {
+  if (UseCompressedOops) {
+    objarray_follow_contents<narrowOop>(cm, obj, index);
+  } else {
+    objarray_follow_contents<oop>(cm, obj, index);
+  }
+}
+
+template <class T>
+void objArrayKlass::objarray_follow_contents(ParCompactionManager* cm, oop obj,
+                                             int index) {
+  objArrayOop a = objArrayOop(obj);
+  const size_t len = size_t(a->length());
+  const size_t beg_index = size_t(index);
+  assert(beg_index < len || len == 0, "index too large");
+
+  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
+  const size_t end_index = beg_index + stride;
+  T* const base = (T*)a->base();
+  T* const beg = base + beg_index;
+  T* const end = base + end_index;
+
+  // Push the non-NULL elements of the next stride on the marking stack.
+  for (T* e = beg; e < end; e++) {
+    PSParallelCompact::mark_and_push<T>(cm, e);
+  }
+
+  if (end_index < len) {
+    cm->push_objarray(a, end_index); // Push the continuation.
+  }
+}
+#endif // #ifndef SERIALGC
--- a/src/share/vm/oops/typeArrayKlass.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Thu May 13 18:26:35 2010 -0700
@@ -123,16 +123,16 @@
      || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) {
     THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
   }
+  // Check zero copy
+  if (length == 0)
+    return;
 
   // This is an attempt to make the copy_array fast.
-  // NB: memmove takes care of overlapping memory segments.
-  // Potential problem: memmove is not guaranteed to be word atomic
-  // Revisit in Merlin
   int l2es = log2_element_size();
   int ihs = array_header_in_bytes() / wordSize;
-  char* src = (char*) ((oop*)s + ihs) + (src_pos << l2es);
-  char* dst = (char*) ((oop*)d + ihs) + (dst_pos << l2es);
-  memmove(dst, src, length << l2es);
+  char* src = (char*) ((oop*)s + ihs) + ((size_t)src_pos << l2es);
+  char* dst = (char*) ((oop*)d + ihs) + ((size_t)dst_pos << l2es);
+  Copy::conjoint_memory_atomic(src, dst, (size_t)length << l2es);
 }
 
 
--- a/src/share/vm/opto/c2_globals.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/c2_globals.hpp	Thu May 13 18:26:35 2010 -0700
@@ -52,9 +52,6 @@
           "Code alignment for interior entry points "                       \
           "in generated code (in bytes)")                                   \
                                                                             \
-  product_pd(intx, OptoLoopAlignment,                                       \
-          "Align inner loops to zero relative to this modulus")             \
-                                                                            \
   product(intx, MaxLoopPad, (OptoLoopAlignment-1),                          \
           "Align a loop if padding size in bytes is less or equal to this value") \
                                                                             \
--- a/src/share/vm/opto/c2compiler.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/c2compiler.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -105,8 +105,7 @@
   }
   bool subsume_loads = true;
   bool do_escape_analysis = DoEscapeAnalysis &&
-                            !(env->jvmti_can_hotswap_or_post_breakpoint() ||
-                              env->jvmti_can_examine_or_deopt_anywhere());
+    !env->jvmti_can_access_local_variables();
   while (!env->failing()) {
     // Attempt to compile while subsuming loads into machine instructions.
     Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
--- a/src/share/vm/opto/cfgnode.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Thu May 13 18:26:35 2010 -0700
@@ -956,6 +956,7 @@
     }
     if( jtkp && ttkp ) {
       if( jtkp->is_loaded() &&  jtkp->klass()->is_interface() &&
+          !jtkp->klass_is_exact() && // Keep exact interface klass (6894807)
           ttkp->is_loaded() && !ttkp->klass()->is_interface() ) {
         assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) ||
                ft->isa_narrowoop() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), "");
--- a/src/share/vm/opto/classes.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/classes.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,8 @@
 macro(BoxLock)
 macro(ReverseBytesI)
 macro(ReverseBytesL)
+macro(ReverseBytesUS)
+macro(ReverseBytesS)
 macro(CProj)
 macro(CallDynamicJava)
 macro(CallJava)
--- a/src/share/vm/opto/compile.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/compile.cpp	Thu May 13 18:26:35 2010 -0700
@@ -871,7 +871,6 @@
   set_has_split_ifs(false);
   set_has_loops(has_method() && method()->has_loops()); // first approximation
   set_has_stringbuilder(false);
-  _deopt_happens = true;  // start out assuming the worst
   _trap_can_recompile = false;  // no traps emitted yet
   _major_progress = true; // start out assuming good things will happen
   set_has_unsafe_access(false);
--- a/src/share/vm/opto/compile.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/compile.hpp	Thu May 13 18:26:35 2010 -0700
@@ -146,7 +146,6 @@
   int                   _orig_pc_slot_offset_in_bytes;
 
   int                   _major_progress;        // Count of something big happening
-  bool                  _deopt_happens;         // TRUE if de-optimization CAN happen
   bool                  _has_loops;             // True if the method _may_ have some loops
   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
@@ -300,7 +299,6 @@
   void          set_freq_inline_size(int n)     { _freq_inline_size = n; }
   int               freq_inline_size() const    { return _freq_inline_size; }
   void          set_max_inline_size(int n)      { _max_inline_size = n; }
-  bool              deopt_happens() const       { return _deopt_happens; }
   bool              has_loops() const           { return _has_loops; }
   void          set_has_loops(bool z)           { _has_loops = z; }
   bool              has_split_ifs() const       { return _has_split_ifs; }
--- a/src/share/vm/opto/doCall.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/doCall.cpp	Thu May 13 18:26:35 2010 -0700
@@ -714,8 +714,6 @@
 
   // iterate through all entries sequentially
   for (;!handlers.is_done(); handlers.next()) {
-    // Do nothing if turned off
-    if( !DeutschShiffmanExceptions ) break;
     ciExceptionHandler* handler = handlers.handler();
 
     if (handler->is_rethrow()) {
@@ -741,46 +739,26 @@
       return;                   // No more handling to be done here!
     }
 
-    // %%% The following logic replicates make_from_klass_unique.
-    // TO DO:  Replace by a subroutine call.  Then generalize
-    // the type check, as noted in the next "%%%" comment.
-
+    // Get the handler's klass
     ciInstanceKlass* klass = handler->catch_klass();
-    if (UseUniqueSubclasses) {
-      // (We use make_from_klass because it respects UseUniqueSubclasses.)
-      const TypeOopPtr* tp = TypeOopPtr::make_from_klass(klass);
-      klass = tp->klass()->as_instance_klass();
+
+    if (!klass->is_loaded()) {  // klass is not loaded?
+      // fall through into catch_call_exceptions which will emit a
+      // handler with an uncommon trap.
+      break;
     }
 
-    // Get the handler's klass
-    if (!klass->is_loaded())    // klass is not loaded?
-      break;                    // Must call Rethrow!
     if (klass->is_interface())  // should not happen, but...
       break;                    // bail out
-    // See if the loaded exception klass has no subtypes
-    if (klass->has_subklass())
-      break;                    // Cannot easily do precise test ==> Rethrow
 
-    // %%% Now that subclass checking is very fast, we need to rewrite
-    // this section and remove the option "DeutschShiffmanExceptions".
-    // The exception processing chain should be a normal typecase pattern,
-    // with a bailout to the interpreter only in the case of unloaded
-    // classes.  (The bailout should mark the method non-entrant.)
-    // This rewrite should be placed in GraphKit::, not Parse::.
-
-    // Add a dependence; if any subclass added we need to recompile
-    // %%% should use stronger assert_unique_concrete_subtype instead
-    if (!klass->is_final()) {
-      C->dependencies()->assert_leaf_type(klass);
-    }
-
-    // Implement precise test
+    // Check the type of the exception against the catch type
     const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
     Node* con = _gvn.makecon(tk);
-    Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) );
-    Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
-    { BuildCutout unless(this, bol, PROB_LIKELY(0.7f));
-      const TypeInstPtr* tinst = TypeInstPtr::make_exact(TypePtr::NotNull, klass);
+    Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
+    if (!stopped()) {
+      PreserveJVMState pjvms(this);
+      const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
+      assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
       Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst));
       push_ex_oop(ex_oop);      // Push exception oop for handler
 #ifndef PRODUCT
@@ -792,6 +770,7 @@
 #endif
       merge_exception(handler_bci);
     }
+    set_control(not_subtype_ctrl);
 
     // Come here if exception does not match handler.
     // Carry on with more handler checks.
@@ -800,21 +779,6 @@
 
   assert(!stopped(), "you should return if you finish the chain");
 
-  if (remaining == 1) {
-    // Further checks do not matter.
-  }
-
-  if (can_rerun_bytecode()) {
-    // Do not push_ex_oop here!
-    // Re-executing the bytecode will reproduce the throwing condition.
-    bool must_throw = true;
-    uncommon_trap(Deoptimization::Reason_unhandled,
-                  Deoptimization::Action_none,
-                  (ciKlass*)NULL, (const char*)NULL, // default args
-                  must_throw);
-    return;
-  }
-
   // Oops, need to call into the VM to resolve the klasses at runtime.
   // Note:  This call must not deoptimize, since it is not a real at this bci!
   kill_dead_locals();
--- a/src/share/vm/opto/graphKit.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Thu May 13 18:26:35 2010 -0700
@@ -812,10 +812,6 @@
 
   JVMState* youngest_jvms = sync_jvms();
 
-  // Do we need debug info here?  If it is a SafePoint and this method
-  // cannot de-opt, then we do NOT need any debug info.
-  bool full_info = (C->deopt_happens() || call->Opcode() != Op_SafePoint);
-
   // If we are guaranteed to throw, we can prune everything but the
   // input to the current bytecode.
   bool can_prune_locals = false;
@@ -829,10 +825,9 @@
     }
   }
 
-  if (env()->jvmti_can_examine_or_deopt_anywhere()) {
+  if (env()->jvmti_can_access_local_variables()) {
     // At any safepoint, this method can get breakpointed, which would
     // then require an immediate deoptimization.
-    full_info = true;
     can_prune_locals = false;  // do not prune locals
     stack_slots_not_pruned = 0;
   }
@@ -890,7 +885,7 @@
     k = in_jvms->locoff();
     l = in_jvms->loc_size();
     out_jvms->set_locoff(p);
-    if (full_info && !can_prune_locals) {
+    if (!can_prune_locals) {
       for (j = 0; j < l; j++)
         call->set_req(p++, in_map->in(k+j));
     } else {
@@ -901,7 +896,7 @@
     k = in_jvms->stkoff();
     l = in_jvms->sp();
     out_jvms->set_stkoff(p);
-    if (full_info && !can_prune_locals) {
+    if (!can_prune_locals) {
       for (j = 0; j < l; j++)
         call->set_req(p++, in_map->in(k+j));
     } else if (can_prune_locals && stack_slots_not_pruned != 0) {
--- a/src/share/vm/opto/library_call.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/library_call.cpp	Thu May 13 18:26:35 2010 -0700
@@ -636,6 +636,8 @@
 
   case vmIntrinsics::_reverseBytes_i:
   case vmIntrinsics::_reverseBytes_l:
+  case vmIntrinsics::_reverseBytes_s:
+  case vmIntrinsics::_reverseBytes_c:
     return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
 
   case vmIntrinsics::_get_AtomicLong:
@@ -2010,13 +2012,19 @@
   return true;
 }
 
-//----------------------------inline_reverseBytes_int/long-------------------
+//----------------------------inline_reverseBytes_int/long/char/short-------------------
 // inline Integer.reverseBytes(int)
 // inline Long.reverseBytes(long)
+// inline Character.reverseBytes(char)
+// inline Short.reverseBytes(short)
 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes");
-  if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
-  if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false;
+  assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l ||
+         id == vmIntrinsics::_reverseBytes_c || id == vmIntrinsics::_reverseBytes_s,
+         "not reverse Bytes");
+  if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI))  return false;
+  if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL))  return false;
+  if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false;
+  if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS))  return false;
   _sp += arg_size();        // restore stack pointer
   switch (id) {
   case vmIntrinsics::_reverseBytes_i:
@@ -2025,6 +2033,12 @@
   case vmIntrinsics::_reverseBytes_l:
     push_pair(_gvn.transform(new (C, 2) ReverseBytesLNode(0, pop_pair())));
     break;
+  case vmIntrinsics::_reverseBytes_c:
+    push(_gvn.transform(new (C, 2) ReverseBytesUSNode(0, pop())));
+    break;
+  case vmIntrinsics::_reverseBytes_s:
+    push(_gvn.transform(new (C, 2) ReverseBytesSNode(0, pop())));
+    break;
   default:
     ;
   }
--- a/src/share/vm/opto/loopTransform.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/loopTransform.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1785,6 +1785,8 @@
 bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reason_predicate) {
   Node *in0 = proj->in(0);
   if (!in0->is_If()) return false;
+  // Variation of a dead If node.
+  if (in0->outcnt() < 2)  return false;
   IfNode* iff = in0->as_If();
 
   // we need "If(Conv2B(Opaque1(...)))" pattern for must_reason_predicate
@@ -2086,29 +2088,41 @@
 BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
                                        int scale, Node* offset,
                                        Node* init, Node* limit, Node* stride,
-                                       Node* range) {
+                                       Node* range, bool upper) {
+  DEBUG_ONLY(ttyLocker ttyl);
+  if (TraceLoopPredicate) tty->print("rc_predicate ");
+
   Node* max_idx_expr  = init;
   int stride_con = stride->get_int();
-  if ((stride_con > 0) == (scale > 0)) {
+  if ((stride_con > 0) == (scale > 0) == upper) {
     max_idx_expr = new (C, 3) SubINode(limit, stride);
     register_new_node(max_idx_expr, ctrl);
+    if (TraceLoopPredicate) tty->print("(limit - stride) ");
+  } else {
+    if (TraceLoopPredicate) tty->print("init ");
   }
 
   if (scale != 1) {
     ConNode* con_scale = _igvn.intcon(scale);
     max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
     register_new_node(max_idx_expr, ctrl);
+    if (TraceLoopPredicate) tty->print("* %d ", scale);
   }
 
   if (offset && (!offset->is_Con() || offset->get_int() != 0)){
     max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
     register_new_node(max_idx_expr, ctrl);
+    if (TraceLoopPredicate)
+      if (offset->is_Con()) tty->print("+ %d ", offset->get_int());
+      else tty->print("+ offset ");
   }
 
   CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
   register_new_node(cmp, ctrl);
   BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
   register_new_node(bol, ctrl);
+
+  if (TraceLoopPredicate) tty->print_cr("<u range");
   return bol;
 }
 
@@ -2117,6 +2131,18 @@
 bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
   if (!UseLoopPredicate) return false;
 
+  if (!loop->_head->is_Loop()) {
+    // Could be a simple region when irreducible loops are present.
+    return false;
+  }
+
+  CountedLoopNode *cl = NULL;
+  if (loop->_head->is_CountedLoop()) {
+    cl = loop->_head->as_CountedLoop();
+    // do nothing for iteration-splitted loops
+    if (!cl->is_normal_loop()) return false;
+  }
+
   // Too many traps seen?
   bool tmt = C->too_many_traps(C->method(), 0, Deoptimization::Reason_predicate);
   int tc = C->trap_count(Deoptimization::Reason_predicate);
@@ -2129,13 +2155,6 @@
     return false;
   }
 
-  CountedLoopNode *cl = NULL;
-  if (loop->_head->is_CountedLoop()) {
-    cl = loop->_head->as_CountedLoop();
-    // do nothing for iteration-splitted loops
-    if(!cl->is_normal_loop()) return false;
-  }
-
   LoopNode *lpn  = loop->_head->as_Loop();
   Node* entry = lpn->in(LoopNode::EntryControl);
 
@@ -2180,7 +2199,6 @@
   while (if_proj_list.size() > 0) {
     // Following are changed to nonnull when a predicate can be hoisted
     ProjNode* new_predicate_proj = NULL;
-    BoolNode* new_predicate_bol   = NULL;
 
     ProjNode* proj = if_proj_list.pop()->as_Proj();
     IfNode*   iff  = proj->in(0)->as_If();
@@ -2211,93 +2229,120 @@
       // Invariant test
       new_predicate_proj = create_new_if_for_predicate(predicate_proj);
       Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
-      new_predicate_bol  = invar.clone(bol, ctrl)->as_Bool();
-      if (TraceLoopPredicate) tty->print("invariant");
+      BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
+
+      // Negate test if necessary
+      bool negated = false;
+      if (proj->_con != predicate_proj->_con) {
+        new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
+        register_new_node(new_predicate_bol, ctrl);
+        negated = true;
+      }
+      IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
+      _igvn.hash_delete(new_predicate_iff);
+      new_predicate_iff->set_req(1, new_predicate_bol);
+      if (TraceLoopPredicate) tty->print_cr("invariant if%s: %d", negated ? " negated" : "", new_predicate_iff->_idx);
+
     } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
-      // Range check (only for counted loops)
-      new_predicate_proj = create_new_if_for_predicate(predicate_proj);
-      Node *ctrl = new_predicate_proj->in(0)->as_If()->in(0);
+      assert(proj->_con == predicate_proj->_con, "must match");
+
+      // Range check for counted loops
       const Node*    cmp    = bol->in(1)->as_Cmp();
       Node*          idx    = cmp->in(1);
       assert(!invar.is_invariant(idx), "index is variant");
       assert(cmp->in(2)->Opcode() == Op_LoadRange, "must be");
-      LoadRangeNode* ld_rng = (LoadRangeNode*)cmp->in(2); // LoadRangeNode
+      Node* ld_rng = cmp->in(2); // LoadRangeNode
       assert(invar.is_invariant(ld_rng), "load range must be invariant");
-      ld_rng = (LoadRangeNode*)invar.clone(ld_rng, ctrl);
       int scale    = 1;
       Node* offset = zero;
       bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
       assert(ok, "must be index expression");
+
+      Node* init    = cl->init_trip();
+      Node* limit   = cl->limit();
+      Node* stride  = cl->stride();
+
+      // Build if's for the upper and lower bound tests.  The
+      // lower_bound test will dominate the upper bound test and all
+      // cloned or created nodes will use the lower bound test as
+      // their declared control.
+      ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj);
+      ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj);
+      assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
+      Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0);
+
+      // Perform cloning to keep Invariance state correct since the
+      // late schedule will place invariant things in the loop.
+      ld_rng = invar.clone(ld_rng, ctrl);
       if (offset && offset != zero) {
         assert(invar.is_invariant(offset), "offset must be loop invariant");
         offset = invar.clone(offset, ctrl);
       }
-      Node* init    = cl->init_trip();
-      Node* limit   = cl->limit();
-      Node* stride  = cl->stride();
-      new_predicate_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng);
-      if (TraceLoopPredicate) tty->print("range check");
-    }
+
+      // Test the lower bound
+      Node*  lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng, false);
+      IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
+      _igvn.hash_delete(lower_bound_iff);
+      lower_bound_iff->set_req(1, lower_bound_bol);
+      if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
 
-    if (new_predicate_proj == NULL) {
+      // Test the upper bound
+      Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng, true);
+      IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
+      _igvn.hash_delete(upper_bound_iff);
+      upper_bound_iff->set_req(1, upper_bound_bol);
+      if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx);
+
+      // Fall through into rest of the clean up code which will move
+      // any dependent nodes onto the upper bound test.
+      new_predicate_proj = upper_bound_proj;
+    } else {
       // The other proj of the "iff" is a uncommon trap projection, and we can assume
       // the other proj will not be executed ("executed" means uct raised).
       continue;
-    } else {
-      // Success - attach condition (new_predicate_bol) to predicate if
-      invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
-      IfNode* new_iff = new_predicate_proj->in(0)->as_If();
+    }
 
-      // Negate test if necessary
-      if (proj->_con != predicate_proj->_con) {
-        new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
-        register_new_node(new_predicate_bol, new_iff->in(0));
-        if (TraceLoopPredicate) tty->print_cr(" if negated: %d", iff->_idx);
-      } else {
-        if (TraceLoopPredicate) tty->print_cr(" if: %d", iff->_idx);
-      }
+    // Success - attach condition (new_predicate_bol) to predicate if
+    invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
 
-      _igvn.hash_delete(new_iff);
-      new_iff->set_req(1, new_predicate_bol);
-
-      _igvn.hash_delete(iff);
-      iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
+    // Eliminate the old if in the loop body
+    _igvn.hash_delete(iff);
+    iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
 
-      Node* ctrl = new_predicate_proj; // new control
-      ProjNode* dp = proj;     // old control
-      assert(get_loop(dp) == loop, "guarenteed at the time of collecting proj");
-      // Find nodes (depends only on the test) off the surviving projection;
-      // move them outside the loop with the control of proj_clone
-      for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
-        Node* cd = dp->fast_out(i); // Control-dependent node
-        if (cd->depends_only_on_test()) {
-          assert(cd->in(0) == dp, "");
-          _igvn.hash_delete(cd);
-          cd->set_req(0, ctrl); // ctrl, not NULL
-          set_early_ctrl(cd);
-          _igvn._worklist.push(cd);
-          IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
-          if (new_loop != loop) {
-            if (!loop->_child) loop->_body.yank(cd);
-            if (!new_loop->_child ) new_loop->_body.push(cd);
-          }
-          --i;
-          --imax;
+    Node* ctrl = new_predicate_proj; // new control
+    ProjNode* dp = proj;     // old control
+    assert(get_loop(dp) == loop, "guaranteed at the time of collecting proj");
+    // Find nodes (depends only on the test) off the surviving projection;
+    // move them outside the loop with the control of proj_clone
+    for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
+      Node* cd = dp->fast_out(i); // Control-dependent node
+      if (cd->depends_only_on_test()) {
+        assert(cd->in(0) == dp, "");
+        _igvn.hash_delete(cd);
+        cd->set_req(0, ctrl); // ctrl, not NULL
+        set_early_ctrl(cd);
+        _igvn._worklist.push(cd);
+        IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
+        if (new_loop != loop) {
+          if (!loop->_child) loop->_body.yank(cd);
+          if (!new_loop->_child ) new_loop->_body.push(cd);
         }
+        --i;
+        --imax;
       }
+    }
 
-      hoisted = true;
-      C->set_major_progress();
-    }
+    hoisted = true;
+    C->set_major_progress();
   } // end while
 
 #ifndef PRODUCT
-    // report that the loop predication has been actually performed
-    // for this loop
-    if (TraceLoopPredicate && hoisted) {
-      tty->print("Loop Predication Performed:");
-      loop->dump_head();
-    }
+  // report that the loop predication has been actually performed
+  // for this loop
+  if (TraceLoopPredicate && hoisted) {
+    tty->print("Loop Predication Performed:");
+    loop->dump_head();
+  }
 #endif
 
   return hoisted;
--- a/src/share/vm/opto/loopnode.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/loopnode.hpp	Thu May 13 18:26:35 2010 -0700
@@ -821,7 +821,7 @@
   BoolNode* rc_predicate(Node* ctrl,
                          int scale, Node* offset,
                          Node* init, Node* limit, Node* stride,
-                         Node* range);
+                         Node* range, bool upper);
 
   // Implementation of the loop predication to promote checks outside the loop
   bool loop_predication_impl(IdealLoopTree *loop);
--- a/src/share/vm/opto/macro.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/macro.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1487,11 +1487,11 @@
                                         Node*& contended_phi_rawmem,
                                         Node* old_eden_top, Node* new_eden_top,
                                         Node* length) {
+   enum { fall_in_path = 1, pf_path = 2 };
    if( UseTLAB && AllocatePrefetchStyle == 2 ) {
       // Generate prefetch allocation with watermark check.
       // As an allocation hits the watermark, we will prefetch starting
       // at a "distance" away from watermark.
-      enum { fall_in_path = 1, pf_path = 2 };
 
       Node *pf_region = new (C, 3) RegionNode(3);
       Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
@@ -1570,6 +1570,45 @@
       needgc_false = pf_region;
       contended_phi_rawmem = pf_phi_rawmem;
       i_o = pf_phi_abio;
+   } else if( UseTLAB && AllocatePrefetchStyle == 3 ) {
+      // Insert a prefetch for each allocation only on the fast-path
+      Node *pf_region = new (C, 3) RegionNode(3);
+      Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
+                                                TypeRawPtr::BOTTOM );
+
+      // Generate several prefetch instructions only for arrays.
+      uint lines = (length != NULL) ? AllocatePrefetchLines : 1;
+      uint step_size = AllocatePrefetchStepSize;
+      uint distance = AllocatePrefetchDistance;
+
+      // Next cache address.
+      Node *cache_adr = new (C, 4) AddPNode(old_eden_top, old_eden_top,
+                                            _igvn.MakeConX(distance));
+      transform_later(cache_adr);
+      cache_adr = new (C, 2) CastP2XNode(needgc_false, cache_adr);
+      transform_later(cache_adr);
+      Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
+      cache_adr = new (C, 3) AndXNode(cache_adr, mask);
+      transform_later(cache_adr);
+      cache_adr = new (C, 2) CastX2PNode(cache_adr);
+      transform_later(cache_adr);
+
+      // Prefetch
+      Node *prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, cache_adr );
+      prefetch->set_req(0, needgc_false);
+      transform_later(prefetch);
+      contended_phi_rawmem = prefetch;
+      Node *prefetch_adr;
+      distance = step_size;
+      for ( uint i = 1; i < lines; i++ ) {
+        prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr,
+                                            _igvn.MakeConX(distance) );
+        transform_later(prefetch_adr);
+        prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, prefetch_adr );
+        transform_later(prefetch);
+        distance += step_size;
+        contended_phi_rawmem = prefetch;
+      }
    } else if( AllocatePrefetchStyle > 0 ) {
       // Insert a prefetch for each allocation only on the fast-path
       Node *prefetch_adr;
--- a/src/share/vm/opto/matcher.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/matcher.hpp	Thu May 13 18:26:35 2010 -0700
@@ -373,8 +373,8 @@
   // to implement the UseStrictFP mode.
   static const bool strict_fp_requires_explicit_rounding;
 
-  // Do floats take an entire double register or just half?
-  static const bool float_in_double;
+  // Are floats conerted to double when stored to stack during deoptimization?
+  static bool float_in_double();
   // Do ints take an entire long register or just half?
   static const bool int_in_long;
 
--- a/src/share/vm/opto/memnode.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/memnode.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1244,5 +1244,5 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return NotAMachineReg; }
   virtual uint match_edge(uint idx) const { return idx==2; }
-  virtual const Type *bottom_type() const { return Type::ABIO; }
+  virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
 };
--- a/src/share/vm/opto/output.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/output.cpp	Thu May 13 18:26:35 2010 -0700
@@ -678,7 +678,7 @@
 #endif //_LP64
     else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
                OptoReg::is_reg(regnum) ) {
-      array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double
+      array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double()
                                    ? Location::float_in_dbl : Location::normal ));
     } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
       array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
--- a/src/share/vm/opto/parse.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/parse.hpp	Thu May 13 18:26:35 2010 -0700
@@ -551,9 +551,6 @@
   // Also handles exceptions for individual bytecodes.
   void catch_inline_exceptions(SafePointNode* ex_map);
 
-  // Bytecode classifier, helps decide to use uncommon_trap vs. rethrow_C.
-  bool can_rerun_bytecode();
-
   // Merge the given map into correct exceptional exit state.
   // Assumes that there is no applicable local handler.
   void throw_to_exit(SafePointNode* ex_map);
--- a/src/share/vm/opto/parse1.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/parse1.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -237,7 +237,6 @@
     C->record_method_not_compilable("OSR in empty or breakpointed method");
     return;
   }
-  MethodLivenessResult raw_live_locals = method()->raw_liveness_at_bci(osr_bci());
 
   // Extract the needed locals from the interpreter frame.
   Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
@@ -281,7 +280,13 @@
       continue;
     }
     // Construct code to access the appropriate local.
-    Node *value = fetch_interpreter_state(index, type->basic_type(), locals_addr, osr_buf);
+    BasicType bt = type->basic_type();
+    if (type == TypePtr::NULL_PTR) {
+      // Ptr types are mixed together with T_ADDRESS but NULL is
+      // really for T_OBJECT types so correct it.
+      bt = T_OBJECT;
+    }
+    Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
     set_local(index, value);
   }
 
@@ -306,6 +311,7 @@
   SafePointNode* bad_type_exit = clone_map();
   bad_type_exit->set_control(new (C, 1) RegionNode(1));
 
+  assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
   for (index = 0; index < max_locals; index++) {
     if (stopped())  break;
     Node* l = local(index);
@@ -317,8 +323,18 @@
         continue;
       }
     }
-    if (type->basic_type() == T_ADDRESS && !raw_live_locals.at(index)) {
-      // Skip type check for dead address locals
+    if (osr_block->flow()->local_type_at(index)->is_return_address()) {
+      // In our current system it's illegal for jsr addresses to be
+      // live into an OSR entry point because the compiler performs
+      // inlining of jsrs.  ciTypeFlow has a bailout that detect this
+      // case and aborts the compile if addresses are live into an OSR
+      // entry point.  Because of that we can assume that any address
+      // locals at the OSR entry point are dead.  Method liveness
+      // isn't precise enought to figure out that they are dead in all
+      // cases so simply skip checking address locals all
+      // together. Any type check is guaranteed to fail since the
+      // interpreter type is the result of a load which might have any
+      // value and the expected type is a constant.
       continue;
     }
     set_local(index, check_interpreter_type(l, type, bad_type_exit));
@@ -788,67 +804,6 @@
   initial_gvn()->transform_no_reclaim(exit);
 }
 
-bool Parse::can_rerun_bytecode() {
-  switch (bc()) {
-  case Bytecodes::_ldc:
-  case Bytecodes::_ldc_w:
-  case Bytecodes::_ldc2_w:
-  case Bytecodes::_getfield:
-  case Bytecodes::_putfield:
-  case Bytecodes::_getstatic:
-  case Bytecodes::_putstatic:
-  case Bytecodes::_arraylength:
-  case Bytecodes::_baload:
-  case Bytecodes::_caload:
-  case Bytecodes::_iaload:
-  case Bytecodes::_saload:
-  case Bytecodes::_faload:
-  case Bytecodes::_aaload:
-  case Bytecodes::_laload:
-  case Bytecodes::_daload:
-  case Bytecodes::_bastore:
-  case Bytecodes::_castore:
-  case Bytecodes::_iastore:
-  case Bytecodes::_sastore:
-  case Bytecodes::_fastore:
-  case Bytecodes::_aastore:
-  case Bytecodes::_lastore:
-  case Bytecodes::_dastore:
-  case Bytecodes::_irem:
-  case Bytecodes::_idiv:
-  case Bytecodes::_lrem:
-  case Bytecodes::_ldiv:
-  case Bytecodes::_frem:
-  case Bytecodes::_fdiv:
-  case Bytecodes::_drem:
-  case Bytecodes::_ddiv:
-  case Bytecodes::_checkcast:
-  case Bytecodes::_instanceof:
-  case Bytecodes::_anewarray:
-  case Bytecodes::_newarray:
-  case Bytecodes::_multianewarray:
-  case Bytecodes::_new:
-  case Bytecodes::_monitorenter:  // can re-run initial null check, only
-  case Bytecodes::_return:
-    return true;
-    break;
-
-  // Don't rerun athrow since it's part of the exception path.
-  case Bytecodes::_athrow:
-  case Bytecodes::_invokestatic:
-  case Bytecodes::_invokedynamic:
-  case Bytecodes::_invokespecial:
-  case Bytecodes::_invokevirtual:
-  case Bytecodes::_invokeinterface:
-    return false;
-    break;
-
-  default:
-    assert(false, "unexpected bytecode produced an exception");
-    return true;
-  }
-}
-
 //---------------------------do_exceptions-------------------------------------
 // Process exceptions arising from the current bytecode.
 // Send caught exceptions to the proper handler within this method.
@@ -862,9 +817,6 @@
     return;
   }
 
-  // Make sure we can classify this bytecode if we need to.
-  debug_only(can_rerun_bytecode());
-
   PreserveJVMState pjvms(this, false);
 
   SafePointNode* ex_map;
--- a/src/share/vm/opto/runtime.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/runtime.cpp	Thu May 13 18:26:35 2010 -0700
@@ -864,8 +864,8 @@
     thread->set_exception_handler_pc(handler_address);
     thread->set_exception_stack_size(0);
 
-    // Check if the exception PC is a MethodHandle call.
-    thread->set_is_method_handle_exception(nm->is_method_handle_return(pc));
+    // Check if the exception PC is a MethodHandle call site.
+    thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
   }
 
   // Restore correct return pc.  Was saved above.
@@ -952,7 +952,7 @@
 
   thread->set_vm_result(exception);
   // Frame not compiled (handles deoptimization blob)
-  return SharedRuntime::raw_exception_handler_for_return_address(ret_pc);
+  return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
 }
 
 
--- a/src/share/vm/opto/split_if.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/split_if.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -187,10 +187,20 @@
   }
 #endif
 
+  // ConvI2L may have type information on it which becomes invalid if
+  // it moves up in the graph so change any clones so widen the type
+  // to TypeLong::INT when pushing it up.
+  const Type* rtype = NULL;
+  if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) {
+    rtype = TypeLong::INT;
+  }
+
   // Now actually split-up this guy.  One copy per control path merging.
   Node *phi = PhiNode::make_blank(blk1, n);
   for( uint j = 1; j < blk1->req(); j++ ) {
     Node *x = n->clone();
+    // Widen the type of the ConvI2L when pushing up.
+    if (rtype != NULL) x->as_Type()->set_type(rtype);
     if( n->in(0) && n->in(0) == blk1 )
       x->set_req( 0, blk1->in(j) );
     for( uint i = 1; i < n->req(); i++ ) {
--- a/src/share/vm/opto/subnode.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/subnode.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -509,3 +509,23 @@
   const Type *bottom_type() const { return TypeLong::LONG; }
   virtual uint ideal_reg() const { return Op_RegL; }
 };
+
+//-------------------------------ReverseBytesUSNode--------------------------------
+// reverse bytes of an unsigned short / char
+class ReverseBytesUSNode : public Node {
+public:
+  ReverseBytesUSNode(Node *c, Node *in1) : Node(c, in1) {}
+  virtual int Opcode() const;
+  const Type *bottom_type() const { return TypeInt::CHAR; }
+  virtual uint ideal_reg() const { return Op_RegI; }
+};
+
+//-------------------------------ReverseBytesSNode--------------------------------
+// reverse bytes of a short
+class ReverseBytesSNode : public Node {
+public:
+  ReverseBytesSNode(Node *c, Node *in1) : Node(c, in1) {}
+  virtual int Opcode() const;
+  const Type *bottom_type() const { return TypeInt::SHORT; }
+  virtual uint ideal_reg() const { return Op_RegI; }
+};
--- a/src/share/vm/opto/type.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/opto/type.cpp	Thu May 13 18:26:35 2010 -0700
@@ -2545,12 +2545,15 @@
       ftip->is_loaded() &&  ftip->klass()->is_interface() &&
       ktip->is_loaded() && !ktip->klass()->is_interface()) {
     // Happens in a CTW of rt.jar, 320-341, no extra flags
+    assert(!ftip->klass_is_exact(), "interface could not be exact");
     return ktip->cast_to_ptr_type(ftip->ptr());
   }
+  // Interface klass type could be exact in opposite to interface type,
+  // return it here instead of incorrect Constant ptr J/L/Object (6894807).
   if (ftkp != NULL && ktkp != NULL &&
       ftkp->is_loaded() &&  ftkp->klass()->is_interface() &&
+      !ftkp->klass_is_exact() && // Keep exact interface klass
       ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
-    // Happens in a CTW of rt.jar, 320-341, no extra flags
     return ktkp->cast_to_ptr_type(ftkp->ptr());
   }
 
@@ -2809,7 +2812,8 @@
         // then we can subclass in the Java class hierarchy.
         if (klass()->equals(ciEnv::current()->Object_klass())) {
           // that is, tp's array type is a subtype of my klass
-          return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id);
+          return TypeAryPtr::make(ptr, (ptr == Constant ? tp->const_oop() : NULL),
+                                  tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id);
         }
       }
       // The other case cannot happen, since I cannot be a subtype of an array.
@@ -3415,7 +3419,8 @@
         // then we can subclass in the Java class hierarchy.
         if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) {
           // that is, my array type is a subtype of 'tp' klass
-          return make( ptr, _ary, _klass, _klass_is_exact, offset, instance_id );
+          return make( ptr, (ptr == Constant ? const_oop() : NULL),
+                       _ary, _klass, _klass_is_exact, offset, instance_id );
         }
       }
       // The other case cannot happen, since t cannot be a subtype of an array.
--- a/src/share/vm/prims/forte.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/forte.cpp	Thu May 13 18:26:35 2010 -0700
@@ -55,12 +55,11 @@
 };
 
 
-static void is_decipherable_compiled_frame(frame* fr, RegisterMap* map,
-  bool* is_compiled_p, bool* is_walkable_p);
+static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm);
 static bool is_decipherable_interpreted_frame(JavaThread* thread,
-                                                frame* fr,
-                                                methodOop* method_p,
-                                                int* bci_p);
+                                              frame* fr,
+                                              methodOop* method_p,
+                                              int* bci_p);
 
 
 
@@ -122,41 +121,43 @@
 // Determine if 'fr' is a decipherable compiled frame. We are already
 // assured that fr is for a java nmethod.
 
-static bool is_decipherable_compiled_frame(frame* fr) {
-
-  assert(fr->cb() != NULL && fr->cb()->is_nmethod(), "invariant");
-  nmethod* nm = (nmethod*) fr->cb();
+static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm) {
   assert(nm->is_java_method(), "invariant");
 
-  // First try and find an exact PcDesc
-
-  PcDesc* pc_desc = nm->pc_desc_at(fr->pc());
-
-  // Did we find a useful PcDesc?
-  if (pc_desc != NULL &&
-      pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
-
-    address probe_pc = fr->pc() + 1;
-    pc_desc = nm->pc_desc_near(probe_pc);
+  if (thread->has_last_Java_frame() && thread->last_Java_pc() == fr->pc()) {
+    // We're stopped at a call into the JVM so look for a PcDesc with
+    // the actual pc reported by the frame.
+    PcDesc* pc_desc = nm->pc_desc_at(fr->pc());
 
-    // Now do we have a useful PcDesc?
-
+    // Did we find a useful PcDesc?
     if (pc_desc != NULL &&
-        pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
-      // No debug information available for this pc
-      // vframeStream would explode if we try and walk the frames.
-      return false;
+        pc_desc->scope_decode_offset() != DebugInformationRecorder::serialized_null) {
+      return true;
     }
-
-    // This PcDesc is useful however we must adjust the frame's pc
-    // so that the vframeStream lookups will use this same pc
-
-    fr->set_pc(pc_desc->real_pc(nm));
   }
 
+  // We're at some random pc in the nmethod so search for the PcDesc
+  // whose pc is greater than the current PC.  It's done this way
+  // because the extra PcDescs that are recorded for improved debug
+  // info record the end of the region covered by the ScopeDesc
+  // instead of the beginning.
+  PcDesc* pc_desc = nm->pc_desc_near(fr->pc() + 1);
+
+  // Now do we have a useful PcDesc?
+  if (pc_desc == NULL ||
+      pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
+    // No debug information available for this pc
+    // vframeStream would explode if we try and walk the frames.
+    return false;
+  }
+
+  // This PcDesc is useful however we must adjust the frame's pc
+  // so that the vframeStream lookups will use this same pc
+  fr->set_pc(pc_desc->real_pc(nm));
   return true;
 }
 
+
 // Determine if 'fr' is a walkable interpreted frame. Returns false
 // if it is not. *method_p, and *bci_p are not set when false is
 // returned. *method_p is non-NULL if frame was executing a Java
@@ -166,9 +167,9 @@
 // even if a valid BCI cannot be found.
 
 static bool is_decipherable_interpreted_frame(JavaThread* thread,
-                                                frame* fr,
-                                                methodOop* method_p,
-                                                int* bci_p) {
+                                              frame* fr,
+                                              methodOop* method_p,
+                                              int* bci_p) {
   assert(fr->is_interpreted_frame(), "just checking");
 
   // top frame is an interpreted frame
@@ -323,13 +324,15 @@
       // have a PCDesc that can get us a bci however we did find
       // a method
 
-      if (!is_decipherable_compiled_frame(&candidate)) {
+      if (!is_decipherable_compiled_frame(thread, &candidate, nm)) {
         return false;
       }
 
       // is_decipherable_compiled_frame may modify candidate's pc
       *initial_frame_p = candidate;
 
+      assert(nm->pc_desc_at(candidate.pc()) != NULL, "if it's decipherable then pc must be valid");
+
       return true;
     }
 
--- a/src/share/vm/prims/jni.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/jni.cpp	Thu May 13 18:26:35 2010 -0700
@@ -3401,12 +3401,16 @@
   thread->set_thread_state(_thread_in_vm);
   // Must do this before initialize_thread_local_storage
   thread->record_stack_base_and_size();
+
   thread->initialize_thread_local_storage();
 
   if (!os::create_attached_thread(thread)) {
     delete thread;
     return JNI_ERR;
   }
+  // Enable stack overflow checks
+  thread->create_stack_guard_pages();
+
   thread->initialize_tlab();
 
   // Crucial that we do not have a safepoint check for this thread, since it has
@@ -3452,9 +3456,6 @@
   // to regrab the threads_lock
   thread->set_attached();
 
-  // Enable stack overflow checks
-  thread->create_stack_guard_pages();
-
   // Set java thread status.
   java_lang_Thread::set_thread_status(thread->threadObj(),
               java_lang_Thread::RUNNABLE);
--- a/src/share/vm/prims/jvm.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/jvm.cpp	Thu May 13 18:26:35 2010 -0700
@@ -26,6 +26,10 @@
 #include "incls/_jvm.cpp.incl"
 #include <errno.h>
 
+HS_DTRACE_PROBE_DECL1(hotspot, thread__sleep__begin, long long);
+HS_DTRACE_PROBE_DECL1(hotspot, thread__sleep__end, int);
+HS_DTRACE_PROBE_DECL0(hotspot, thread__yield);
+
 /*
   NOTE about use of any ctor or function call that can trigger a safepoint/GC:
   such ctors and calls MUST NOT come between an oop declaration/init and its
@@ -2773,6 +2777,7 @@
 JVM_ENTRY(void, JVM_Yield(JNIEnv *env, jclass threadClass))
   JVMWrapper("JVM_Yield");
   if (os::dont_yield()) return;
+  HS_DTRACE_PROBE0(hotspot, thread__yield);
   // When ConvertYieldToSleep is off (default), this matches the classic VM use of yield.
   // Critical for similar threading behaviour
   if (ConvertYieldToSleep) {
@@ -2798,6 +2803,8 @@
   // And set new thread state to SLEEPING.
   JavaThreadSleepState jtss(thread);
 
+  HS_DTRACE_PROBE1(hotspot, thread__sleep__begin, millis);
+
   if (millis == 0) {
     // When ConvertSleepToYield is on, this matches the classic VM implementation of
     // JVM_Sleep. Critical for similar threading behaviour (Win32)
@@ -2818,6 +2825,7 @@
       // An asynchronous exception (e.g., ThreadDeathException) could have been thrown on
       // us while we were sleeping. We do not overwrite those.
       if (!HAS_PENDING_EXCEPTION) {
+        HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1);
         // TODO-FIXME: THROW_MSG returns which means we will not call set_state()
         // to properly restore the thread state.  That's likely wrong.
         THROW_MSG(vmSymbols::java_lang_InterruptedException(), "sleep interrupted");
@@ -2825,6 +2833,7 @@
     }
     thread->osthread()->set_state(old_state);
   }
+  HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0);
 JVM_END
 
 JVM_ENTRY(jobject, JVM_CurrentThread(JNIEnv* env, jclass threadClass))
--- a/src/share/vm/prims/jvmtiExport.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/jvmtiExport.cpp	Thu May 13 18:26:35 2010 -0700
@@ -270,7 +270,6 @@
 int               JvmtiExport::_field_modification_count                  = 0;
 
 bool              JvmtiExport::_can_access_local_variables                = false;
-bool              JvmtiExport::_can_examine_or_deopt_anywhere             = false;
 bool              JvmtiExport::_can_hotswap_or_post_breakpoint            = false;
 bool              JvmtiExport::_can_modify_any_class                      = false;
 bool              JvmtiExport::_can_walk_any_space                        = false;
--- a/src/share/vm/prims/jvmtiExport.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/jvmtiExport.hpp	Thu May 13 18:26:35 2010 -0700
@@ -58,7 +58,6 @@
   static int         _field_modification_count;
 
   static bool        _can_access_local_variables;
-  static bool        _can_examine_or_deopt_anywhere;
   static bool        _can_hotswap_or_post_breakpoint;
   static bool        _can_modify_any_class;
   static bool        _can_walk_any_space;
@@ -112,7 +111,6 @@
 
   // these should only be called by the friend class
   friend class JvmtiManageCapabilities;
-  inline static void set_can_examine_or_deopt_anywhere(bool on)        { _can_examine_or_deopt_anywhere = (on != 0); }
   inline static void set_can_modify_any_class(bool on)                 { _can_modify_any_class = (on != 0); }
   inline static void set_can_access_local_variables(bool on)           { _can_access_local_variables = (on != 0); }
   inline static void set_can_hotswap_or_post_breakpoint(bool on)       { _can_hotswap_or_post_breakpoint = (on != 0); }
@@ -220,7 +218,6 @@
   static void enter_live_phase();
 
   // ------ can_* conditions (below) are set at OnLoad and never changed ------------
-  inline static bool can_examine_or_deopt_anywhere()              { return _can_examine_or_deopt_anywhere; }
   inline static bool can_modify_any_class()                       { return _can_modify_any_class; }
   inline static bool can_access_local_variables()                 { return _can_access_local_variables; }
   inline static bool can_hotswap_or_post_breakpoint()             { return _can_hotswap_or_post_breakpoint; }
--- a/src/share/vm/prims/jvmtiManageCapabilities.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp	Thu May 13 18:26:35 2010 -0700
@@ -332,16 +332,6 @@
   }
 
   JvmtiExport::set_can_get_source_debug_extension(avail.can_get_source_debug_extension);
-  JvmtiExport::set_can_examine_or_deopt_anywhere(
-    avail.can_generate_breakpoint_events ||
-    interp_events ||
-    avail.can_redefine_classes ||
-    avail.can_retransform_classes ||
-    avail.can_access_local_variables ||
-    avail.can_get_owned_monitor_info ||
-    avail.can_get_current_contended_monitor ||
-    avail.can_get_monitor_info ||
-    avail.can_get_owned_monitor_stack_depth_info);
   JvmtiExport::set_can_maintain_original_method_order(avail.can_maintain_original_method_order);
   JvmtiExport::set_can_post_interpreter_events(interp_events);
   JvmtiExport::set_can_hotswap_or_post_breakpoint(
@@ -353,10 +343,13 @@
     avail.can_generate_all_class_hook_events);
   JvmtiExport::set_can_walk_any_space(
     avail.can_tag_objects);   // disable sharing in onload phase
+  // This controls whether the compilers keep extra locals live to
+  // improve the debugging experience so only set them if the selected
+  // capabilities look like a debugger.
   JvmtiExport::set_can_access_local_variables(
-    avail.can_access_local_variables  ||
-    avail.can_redefine_classes ||
-    avail.can_retransform_classes);
+    avail.can_access_local_variables ||
+    avail.can_generate_breakpoint_events ||
+    avail.can_generate_frame_pop_events);
   JvmtiExport::set_can_post_on_exceptions(
     avail.can_generate_exception_events ||
     avail.can_generate_frame_pop_events ||
--- a/src/share/vm/prims/methodHandles.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/methodHandles.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,10 @@
   NULL
 };
 
+// Adapters.
+MethodHandlesAdapterBlob* MethodHandles::_adapter_code      = NULL;
+int                       MethodHandles::_adapter_code_size = StubRoutines::method_handles_adapters_code_size;
+
 jobject MethodHandles::_raise_exception_method;
 
 #ifdef ASSERT
@@ -95,6 +99,41 @@
 }
 #endif
 
+
+//------------------------------------------------------------------------------
+// MethodHandles::generate_adapters
+//
+void MethodHandles::generate_adapters() {
+  if (!EnableMethodHandles || SystemDictionary::MethodHandle_klass() == NULL)  return;
+
+  assert(_adapter_code == NULL, "generate only once");
+
+  ResourceMark rm;
+  TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
+  _adapter_code = MethodHandlesAdapterBlob::create(_adapter_code_size);
+  if (_adapter_code == NULL)
+    vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
+  CodeBuffer code(_adapter_code->instructions_begin(), _adapter_code->instructions_size());
+
+  MethodHandlesAdapterGenerator g(&code);
+  g.generate();
+}
+
+
+//------------------------------------------------------------------------------
+// MethodHandlesAdapterGenerator::generate
+//
+void MethodHandlesAdapterGenerator::generate() {
+  // Generate generic method handle adapters.
+  for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
+       ek < MethodHandles::_EK_LIMIT;
+       ek = MethodHandles::EntryKind(1 + (int)ek)) {
+    StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
+    MethodHandles::generate_method_handle_stub(_masm, ek);
+  }
+}
+
+
 void MethodHandles::set_enabled(bool z) {
   if (_enabled != z) {
     guarantee(z && EnableMethodHandles, "can only enable once, and only if -XX:+EnableMethodHandles");
--- a/src/share/vm/prims/methodHandles.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/methodHandles.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,6 +115,10 @@
   static const char*        _entry_names[_EK_LIMIT+1];
   static jobject            _raise_exception_method;
 
+  // Adapters.
+  static MethodHandlesAdapterBlob* _adapter_code;
+  static int                       _adapter_code_size;
+
   static bool ek_valid(EntryKind ek)            { return (uint)ek < (uint)_EK_LIMIT; }
   static bool conv_op_valid(int op)             { return (uint)op < (uint)CONV_OP_LIMIT; }
 
@@ -133,6 +137,43 @@
     _entries[ek] = me;
   }
 
+  // Some adapter helper functions.
+  static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
+    switch (ek) {
+    case _bound_int_mh        : // fall-thru
+    case _bound_int_direct_mh : arg_type = T_INT;    arg_mask = _INSERT_INT_MASK;  break;
+    case _bound_long_mh       : // fall-thru
+    case _bound_long_direct_mh: arg_type = T_LONG;   arg_mask = _INSERT_LONG_MASK; break;
+    case _bound_ref_mh        : // fall-thru
+    case _bound_ref_direct_mh : arg_type = T_OBJECT; arg_mask = _INSERT_REF_MASK;  break;
+    default: ShouldNotReachHere();
+    }
+    arg_slots = type2size[arg_type];
+  }
+
+  static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
+    int swap_slots = 0;
+    switch (ek) {
+    case _adapter_opt_swap_1:     swap_slots = 1; rotate =  0; break;
+    case _adapter_opt_swap_2:     swap_slots = 2; rotate =  0; break;
+    case _adapter_opt_rot_1_up:   swap_slots = 1; rotate =  1; break;
+    case _adapter_opt_rot_1_down: swap_slots = 1; rotate = -1; break;
+    case _adapter_opt_rot_2_up:   swap_slots = 2; rotate =  1; break;
+    case _adapter_opt_rot_2_down: swap_slots = 2; rotate = -1; break;
+    default: ShouldNotReachHere();
+    }
+    // Return the size of the stack slots to move in bytes.
+    swap_bytes = swap_slots * Interpreter::stackElementSize();
+  }
+
+  static int get_ek_adapter_opt_spread_info(EntryKind ek) {
+    switch (ek) {
+    case _adapter_opt_spread_0: return  0;
+    case _adapter_opt_spread_1: return  1;
+    default                   : return -1;
+    }
+  }
+
   static methodOop raise_exception_method() {
     oop rem = JNIHandles::resolve(_raise_exception_method);
     assert(rem == NULL || rem->is_method(), "");
@@ -230,7 +271,10 @@
   // bit values for suppress argument to expand_MemberName:
   enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
 
-  // called from InterpreterGenerator and StubGenerator
+  // Generate MethodHandles adapters.
+  static void generate_adapters();
+
+  // Called from InterpreterGenerator and MethodHandlesAdapterGenerator.
   static address generate_method_handle_interpreter_entry(MacroAssembler* _masm);
   static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek);
 
@@ -385,13 +429,13 @@
   static void insert_arg_slots(MacroAssembler* _masm,
                                RegisterOrConstant arg_slots,
                                int arg_mask,
-                               Register rax_argslot,
-                               Register rbx_temp, Register rdx_temp);
+                               Register argslot_reg,
+                               Register temp_reg, Register temp2_reg);
 
   static void remove_arg_slots(MacroAssembler* _masm,
                                RegisterOrConstant arg_slots,
-                               Register rax_argslot,
-                               Register rbx_temp, Register rdx_temp);
+                               Register argslot_reg,
+                               Register temp_reg, Register temp2_reg);
 };
 
 
@@ -447,3 +491,14 @@
 
 address MethodHandles::from_compiled_entry(EntryKind ek) { return entry(ek)->from_compiled_entry(); }
 address MethodHandles::from_interpreted_entry(EntryKind ek) { return entry(ek)->from_interpreted_entry(); }
+
+
+//------------------------------------------------------------------------------
+// MethodHandlesAdapterGenerator
+//
+class MethodHandlesAdapterGenerator : public StubCodeGenerator {
+public:
+  MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code) {}
+
+  void generate();
+};
--- a/src/share/vm/prims/unsafe.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/prims/unsafe.cpp	Thu May 13 18:26:35 2010 -0700
@@ -29,6 +29,10 @@
 #include "incls/_precompiled.incl"
 #include "incls/_unsafe.cpp.incl"
 
+HS_DTRACE_PROBE_DECL3(hotspot, thread__park__begin, uintptr_t, int, long long);
+HS_DTRACE_PROBE_DECL1(hotspot, thread__park__end, uintptr_t);
+HS_DTRACE_PROBE_DECL1(hotspot, thread__unpark, uintptr_t);
+
 #define MAX_OBJECT_SIZE \
   ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
     + ((julong)max_jint * sizeof(double)) )
@@ -1083,8 +1087,10 @@
 
 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
   UnsafeWrapper("Unsafe_Park");
+  HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time);
   JavaThreadParkedState jtps(thread, time != 0);
   thread->parker()->park(isAbsolute != 0, time);
+  HS_DTRACE_PROBE1(hotspot, thread__park__end, thread->parker());
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread))
@@ -1116,6 +1122,7 @@
     }
   }
   if (p != NULL) {
+    HS_DTRACE_PROBE1(hotspot, thread__unpark, p);
     p->unpark();
   }
 UNSAFE_END
--- a/src/share/vm/runtime/arguments.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1220,6 +1220,11 @@
   if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
     CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
   }
+  if (PrintGCDetails && Verbose) {
+    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
+      MarkStackSize / K, MarkStackSizeMax / K);
+    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
+  }
 }
 #endif // KERNEL
 
@@ -1356,6 +1361,25 @@
   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   }
+
+  if (FLAG_IS_DEFAULT(MarkStackSize)) {
+    FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
+  }
+  if (PrintGCDetails && Verbose) {
+    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
+      MarkStackSize / K, MarkStackSizeMax / K);
+    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
+  }
+
+  if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
+    // In G1, we want the default GC overhead goal to be higher than
+    // say in PS. So we set it here to 10%. Otherwise the heap might
+    // be expanded more aggressively than we would like it to. In
+    // fact, even 10% seems to not be high enough in some cases
+    // (especially small GC stress tests that the main thing they do
+    // is allocation). We might consider increase it further.
+    FLAG_SET_DEFAULT(GCTimeRatio, 9);
+  }
 }
 
 void Arguments::set_heap_size() {
@@ -1754,6 +1778,11 @@
     status = false;
   }
 
+  if (UseG1GC) {
+    status = status && verify_percentage(InitiatingHeapOccupancyPercent,
+                                         "InitiatingHeapOccupancyPercent");
+  }
+
   status = status && verify_interval(RefDiscoveryPolicy,
                                      ReferenceProcessor::DiscoveryPolicyMin,
                                      ReferenceProcessor::DiscoveryPolicyMax,
@@ -1812,6 +1841,29 @@
   return false;
 }
 
+bool Arguments::parse_uintx(const char* value,
+                            uintx* uintx_arg,
+                            uintx min_size) {
+
+  // Check the sign first since atomull() parses only unsigned values.
+  bool value_is_positive = !(*value == '-');
+
+  if (value_is_positive) {
+    julong n;
+    bool good_return = atomull(value, &n);
+    if (good_return) {
+      bool above_minimum = n >= min_size;
+      bool value_is_too_large = n > max_uintx;
+
+      if (above_minimum && !value_is_too_large) {
+        *uintx_arg = n;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 Arguments::ArgsRange Arguments::parse_memory_size(const char* s,
                                                   julong* long_arg,
                                                   julong min_size) {
@@ -2488,6 +2540,37 @@
       jio_fprintf(defaultStream::error_stream(),
                   "Please use -XX:YoungPLABSize in place of "
                   "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
+    } else if (match_option(option, "-XX:CMSMarkStackSize=", &tail) ||
+               match_option(option, "-XX:G1MarkStackSize=", &tail)) {
+      julong stack_size = 0;
+      ArgsRange errcode = parse_memory_size(tail, &stack_size, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid mark stack size: %s\n", option->optionString);
+        describe_range_error(errcode);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
+    } else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
+      julong max_stack_size = 0;
+      ArgsRange errcode = parse_memory_size(tail, &max_stack_size, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum mark stack size: %s\n",
+                    option->optionString);
+        describe_range_error(errcode);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
+    } else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
+               match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
+      uintx conc_threads = 0;
+      if (!parse_uintx(tail, &conc_threads, 1)) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid concurrent threads: %s\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
     } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
       // Skip -XX:Flags= since that case has already been handled
       if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
@@ -2819,6 +2902,12 @@
   }
 #endif // _LP64
 
+  // MethodHandles code does not support TaggedStackInterpreter.
+  if (EnableMethodHandles && TaggedStackInterpreter) {
+    warning("TaggedStackInterpreter is not supported by MethodHandles code.  Disabling TaggedStackInterpreter.");
+    TaggedStackInterpreter = false;
+  }
+
   // Check the GC selections again.
   if (!check_gc_consistency()) {
     return JNI_EINVAL;
--- a/src/share/vm/runtime/arguments.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/arguments.hpp	Thu May 13 18:26:35 2010 -0700
@@ -351,6 +351,12 @@
   static ArgsRange check_memory_size(julong size, julong min_size);
   static ArgsRange parse_memory_size(const char* s, julong* long_arg,
                                      julong min_size);
+  // Parse a string for a unsigned integer.  Returns true if value
+  // is an unsigned integer greater than or equal to the minimum
+  // parameter passed and returns the value in uintx_arg.  Returns
+  // false otherwise, with uintx_arg undefined.
+  static bool parse_uintx(const char* value, uintx* uintx_arg,
+                          uintx min_size);
 
   // methods to build strings from individual args
   static void build_jvm_args(const char* arg);
--- a/src/share/vm/runtime/globals.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1052,7 +1052,8 @@
           "Use SSE2 MOVDQU instruction for Arraycopy")                      \
                                                                             \
   product(intx, FieldsAllocationStyle, 1,                                   \
-          "0 - type based with oops first, 1 - with oops last")             \
+          "0 - type based with oops first, 1 - with oops last, "            \
+          "2 - oops in super and sub classes are together")                 \
                                                                             \
   product(bool, CompactFields, true,                                        \
           "Allocate nonstatic fields in gaps between previous fields")      \
@@ -1245,9 +1246,6 @@
   product(uintx, ParallelGCThreads, 0,                                      \
           "Number of parallel threads parallel gc will use")                \
                                                                             \
-  product(uintx, ParallelCMSThreads, 0,                                     \
-          "Max number of threads CMS will use for concurrent work")         \
-                                                                            \
   develop(bool, ParallelOldGCSplitALot, false,                              \
           "Provoke splitting (copying data from a young gen space to"       \
           "multiple destination spaces)")                                   \
@@ -1258,8 +1256,8 @@
   develop(bool, TraceRegionTasksQueuing, false,                             \
           "Trace the queuing of the region tasks")                          \
                                                                             \
-  product(uintx, ParallelMarkingThreads, 0,                                 \
-          "Number of marking threads concurrent gc will use")               \
+  product(uintx, ConcGCThreads, 0,                                          \
+          "Number of threads concurrent gc will use")                       \
                                                                             \
   product(uintx, YoungPLABSize, 4096,                                       \
           "Size of young gen promotion labs (in HeapWords)")                \
@@ -1535,11 +1533,11 @@
   develop(bool, CMSOverflowEarlyRestoration, false,                         \
           "Whether preserved marks should be restored early")               \
                                                                             \
-  product(uintx, CMSMarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),           \
-          "Size of CMS marking stack")                                      \
-                                                                            \
-  product(uintx, CMSMarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),       \
-          "Max size of CMS marking stack")                                  \
+  product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),              \
+          "Size of marking stack")                                          \
+                                                                            \
+  product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),          \
+          "Max size of marking stack")                                      \
                                                                             \
   notproduct(bool, CMSMarkStackOverflowALot, false,                         \
           "Whether we should simulate frequent marking stack / work queue"  \
@@ -1724,6 +1722,13 @@
           "Percentage CMS generation occupancy to start a CMS collection "  \
           "cycle. A negative value means that CMSTriggerRatio is used")     \
                                                                             \
+  product(uintx, InitiatingHeapOccupancyPercent, 45,                        \
+          "Percentage of the (entire) heap occupancy to start a "           \
+          "concurrent GC cycle. It us used by GCs that trigger a "          \
+          "concurrent GC cycle based on the occupancy of the entire heap, " \
+          "not just one of the generations (e.g., G1). A value of 0 "       \
+          "denotes 'do constant GC cycles'.")                               \
+                                                                            \
   product(intx, CMSInitiatingPermOccupancyFraction, -1,                     \
           "Percentage CMS perm generation occupancy to start a "            \
           "CMScollection cycle. A negative value means that "               \
@@ -1791,6 +1796,10 @@
   product(uintx, PreserveMarkStackSize, 1024,                               \
           "Size for stack used in promotion failure handling")              \
                                                                             \
+  develop(uintx, ObjArrayMarkingStride, 512,                                \
+          "Number of ObjArray elements to push onto the marking stack"      \
+          "before pushing a continuation entry")                            \
+                                                                            \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
   product_pd(bool, ResizeTLAB,                                              \
@@ -2285,6 +2294,10 @@
           "print safepoint statistics only when safepoint takes"            \
           " more than PrintSafepointSatisticsTimeout in millis")            \
                                                                             \
+  product(bool, TraceSafepointCleanupTime, false,                           \
+          "print the break down of clean up tasks performed during"         \
+          " safepoint")                                                     \
+                                                                            \
   develop(bool, InlineAccessors, true,                                      \
           "inline accessor methods (get/set)")                              \
                                                                             \
@@ -2490,10 +2503,6 @@
   notproduct(bool, TraceSpilling, false,                                    \
           "Trace spilling")                                                 \
                                                                             \
-  develop(bool, DeutschShiffmanExceptions, true,                            \
-          "Fast check to find exception handler for precisely typed "       \
-          "exceptions")                                                     \
-                                                                            \
   product(bool, SplitIfBlocks, true,                                        \
           "Clone compares and control flow through merge points to fold "   \
           "some branches")                                                  \
@@ -2699,7 +2708,8 @@
   product(intx,  AllocatePrefetchStyle, 1,                                  \
           "0 = no prefetch, "                                               \
           "1 = prefetch instructions for each allocation, "                 \
-          "2 = use TLAB watermark to gate allocation prefetch")             \
+          "2 = use TLAB watermark to gate allocation prefetch, "            \
+          "3 = use BIS instruction on Sparc for allocation prefetch")       \
                                                                             \
   product(intx,  AllocatePrefetchDistance, -1,                              \
           "Distance to prefetch ahead of allocation pointer")               \
@@ -2925,7 +2935,7 @@
   product(uintx, OldSize, ScaleForWordSize(4*M),                            \
           "Initial tenured generation size (in bytes)")                     \
                                                                             \
-  product(uintx, NewSize, ScaleForWordSize(4*M),                            \
+  product(uintx, NewSize, ScaleForWordSize(1*M),                            \
           "Initial new generation size (in bytes)")                         \
                                                                             \
   product(uintx, MaxNewSize, max_uintx,                                     \
@@ -3102,6 +3112,9 @@
   develop_pd(intx, CodeEntryAlignment,                                      \
           "Code entry alignment for generated code (in bytes)")             \
                                                                             \
+  product_pd(intx, OptoLoopAlignment,                                       \
+          "Align inner loops to zero relative to this modulus")             \
+                                                                            \
   product_pd(uintx, InitialCodeCacheSize,                                   \
           "Initial code cache size (in bytes)")                             \
                                                                             \
--- a/src/share/vm/runtime/init.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/init.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,6 +118,9 @@
   javaClasses_init();  // must happen after vtable initialization
   stubRoutines_init2(); // note: StubRoutines need 2-phase init
 
+  // Generate MethodHandles adapters.
+  MethodHandles::generate_adapters();
+
   // Although we'd like to, we can't easily do a heap verify
   // here because the main thread isn't yet a JavaThread, so
   // its TLAB may not be made parseable from the usual interfaces.
--- a/src/share/vm/runtime/mutexLocker.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/mutexLocker.cpp	Thu May 13 18:26:35 2010 -0700
@@ -70,6 +70,7 @@
 Monitor* CMark_lock                   = NULL;
 Monitor* ZF_mon                       = NULL;
 Monitor* Cleanup_mon                  = NULL;
+Mutex*   CMRegionStack_lock           = NULL;
 Mutex*   SATB_Q_FL_lock               = NULL;
 Monitor* SATB_Q_CBL_mon               = NULL;
 Mutex*   Shared_SATB_Q_lock           = NULL;
@@ -167,6 +168,7 @@
     def(CMark_lock                 , Monitor, nonleaf,     true ); // coordinate concurrent mark thread
     def(ZF_mon                     , Monitor, leaf,        true );
     def(Cleanup_mon                , Monitor, nonleaf,     true );
+    def(CMRegionStack_lock         , Mutex,   leaf,        true );
     def(SATB_Q_FL_lock             , Mutex  , special,     true );
     def(SATB_Q_CBL_mon             , Monitor, nonleaf,     true );
     def(Shared_SATB_Q_lock         , Mutex,   nonleaf,     true );
--- a/src/share/vm/runtime/mutexLocker.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/mutexLocker.hpp	Thu May 13 18:26:35 2010 -0700
@@ -63,6 +63,7 @@
 extern Monitor* CMark_lock;                      // used for concurrent mark thread coordination
 extern Monitor* ZF_mon;                          // used for G1 conc zero-fill.
 extern Monitor* Cleanup_mon;                     // used for G1 conc cleanup.
+extern Mutex*   CMRegionStack_lock;              // used for protecting accesses to the CM region stack
 extern Mutex*   SATB_Q_FL_lock;                  // Protects SATB Q
                                                  // buffer free list.
 extern Monitor* SATB_Q_CBL_mon;                  // Protects SATB Q
--- a/src/share/vm/runtime/os.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/os.hpp	Thu May 13 18:26:35 2010 -0700
@@ -218,6 +218,9 @@
 
   static bool   guard_memory(char* addr, size_t bytes);
   static bool   unguard_memory(char* addr, size_t bytes);
+  static bool   create_stack_guard_pages(char* addr, size_t bytes);
+  static bool   remove_stack_guard_pages(char* addr, size_t bytes);
+
   static char*  map_memory(int fd, const char* file_name, size_t file_offset,
                            char *addr, size_t bytes, bool read_only = false,
                            bool allow_exec = false);
--- a/src/share/vm/runtime/safepoint.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/safepoint.cpp	Thu May 13 18:26:35 2010 -0700
@@ -30,8 +30,8 @@
 
 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
 volatile int  SafepointSynchronize::_waiting_to_block = 0;
-jlong SafepointSynchronize::_last_safepoint = 0;
 volatile int SafepointSynchronize::_safepoint_counter = 0;
+long  SafepointSynchronize::_end_of_last_safepoint = 0;
 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
 static bool timeout_error_printed = false;
@@ -42,7 +42,10 @@
   Thread* myThread = Thread::current();
   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
 
-  _last_safepoint = os::javaTimeNanos();
+  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
+    _safepoint_begin_time = os::javaTimeNanos();
+    _ts_of_current_safepoint = tty->time_stamp().seconds();
+  }
 
 #ifndef SERIALGC
   if (UseConcMarkSweepGC) {
@@ -320,6 +323,11 @@
 
   // Call stuff that needs to be run when a safepoint is just about to be completed
   do_cleanup_tasks();
+
+  if (PrintSafepointStatistics) {
+    // Record how much time spend on the above cleanup tasks
+    update_statistics_on_cleanup_end(os::javaTimeNanos());
+  }
   }
 }
 
@@ -411,6 +419,9 @@
     ConcurrentGCThread::safepoint_desynchronize();
   }
 #endif // SERIALGC
+  // record this time so VMThread can keep track how much time has elasped
+  // since last safepoint.
+  _end_of_last_safepoint = os::javaTimeMillis();
 }
 
 bool SafepointSynchronize::is_cleanup_needed() {
@@ -445,24 +456,23 @@
 
 // Various cleaning tasks that should be done periodically at safepoints
 void SafepointSynchronize::do_cleanup_tasks() {
-  jlong cleanup_time;
-
-  // Update fat-monitor pool, since this is a safepoint.
-  if (TraceSafepoint) {
-    cleanup_time = os::javaTimeNanos();
+  {
+    TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime);
+    ObjectSynchronizer::deflate_idle_monitors();
   }
 
-  ObjectSynchronizer::deflate_idle_monitors();
-  InlineCacheBuffer::update_inline_caches();
+  {
+    TraceTime t2("updating inline caches", TraceSafepointCleanupTime);
+    InlineCacheBuffer::update_inline_caches();
+  }
+
   if(UseCounterDecay && CounterDecay::is_decay_needed()) {
+    TraceTime t3("decaying counter", TraceSafepointCleanupTime);
     CounterDecay::decay();
   }
+
+  TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
   NMethodSweeper::sweep();
-
-  if (TraceSafepoint) {
-    tty->print_cr("do_cleanup_tasks takes "INT64_FORMAT_W(6) "ms",
-                  (os::javaTimeNanos() - cleanup_time) / MICROUNITS);
-  }
 }
 
 
@@ -979,17 +989,32 @@
 //                     Statistics & Instrumentations
 //
 SafepointSynchronize::SafepointStats*  SafepointSynchronize::_safepoint_stats = NULL;
+jlong  SafepointSynchronize::_safepoint_begin_time = 0;
 int    SafepointSynchronize::_cur_stat_index = 0;
 julong SafepointSynchronize::_safepoint_reasons[VM_Operation::VMOp_Terminating];
 julong SafepointSynchronize::_coalesced_vmop_count = 0;
 jlong  SafepointSynchronize::_max_sync_time = 0;
+jlong  SafepointSynchronize::_max_vmop_time = 0;
+float  SafepointSynchronize::_ts_of_current_safepoint = 0.0f;
 
-// last_safepoint_start_time records the start time of last safepoint.
-static jlong  last_safepoint_start_time = 0;
-static jlong  sync_end_time = 0;
+static jlong  cleanup_end_time = 0;
 static bool   need_to_track_page_armed_status = false;
 static bool   init_done = false;
 
+// Helper method to print the header.
+static void print_header() {
+  tty->print("         vmop                    "
+             "[threads: total initially_running wait_to_block]    ");
+  tty->print("[time: spin block sync cleanup vmop] ");
+
+  // no page armed status printed out if it is always armed.
+  if (need_to_track_page_armed_status) {
+    tty->print("page_armed ");
+  }
+
+  tty->print_cr("page_trap_count");
+}
+
 void SafepointSynchronize::deferred_initialize_stat() {
   if (init_done) return;
 
@@ -1016,19 +1041,6 @@
   if (UseCompilerSafepoints && DeferPollingPageLoopCount >= 0) {
     need_to_track_page_armed_status = true;
   }
-
-  tty->print("     vmop_name               "
-             "[threads: total initially_running wait_to_block] ");
-  tty->print("[time: spin block sync] "
-             "[vmop_time  time_elapsed] ");
-
-  // no page armed status printed out if it is always armed.
-  if (need_to_track_page_armed_status) {
-    tty->print("page_armed ");
-  }
-
-  tty->print_cr("page_trap_count");
-
   init_done = true;
 }
 
@@ -1036,6 +1048,8 @@
   assert(init_done, "safepoint statistics array hasn't been initialized");
   SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
 
+  spstat->_time_stamp = _ts_of_current_safepoint;
+
   VM_Operation *op = VMThread::vm_operation();
   spstat->_vmop_type = (op != NULL ? op->type() : -1);
   if (op != NULL) {
@@ -1054,14 +1068,6 @@
   }  else {
     spstat->_time_to_spin = 0;
   }
-
-  if (last_safepoint_start_time == 0) {
-    spstat->_time_elapsed_since_last_safepoint = 0;
-  } else {
-    spstat->_time_elapsed_since_last_safepoint = _last_safepoint -
-      last_safepoint_start_time;
-  }
-  last_safepoint_start_time = _last_safepoint;
 }
 
 void SafepointSynchronize::update_statistics_on_spin_end() {
@@ -1097,18 +1103,31 @@
   // Records the end time of sync which will be used to calculate the total
   // vm operation time. Again, the real time spending in syncing will be deducted
   // from the start of the sync time later when end_statistics is called.
-  spstat->_time_to_sync = end_time - _last_safepoint;
+  spstat->_time_to_sync = end_time - _safepoint_begin_time;
   if (spstat->_time_to_sync > _max_sync_time) {
     _max_sync_time = spstat->_time_to_sync;
   }
-  sync_end_time = end_time;
+
+  spstat->_time_to_do_cleanups = end_time;
+}
+
+void SafepointSynchronize::update_statistics_on_cleanup_end(jlong end_time) {
+  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+
+  // Record how long spent in cleanup tasks.
+  spstat->_time_to_do_cleanups = end_time - spstat->_time_to_do_cleanups;
+
+  cleanup_end_time = end_time;
 }
 
 void SafepointSynchronize::end_statistics(jlong vmop_end_time) {
   SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
 
   // Update the vm operation time.
-  spstat->_time_to_exec_vmop = vmop_end_time -  sync_end_time;
+  spstat->_time_to_exec_vmop = vmop_end_time -  cleanup_end_time;
+  if (spstat->_time_to_exec_vmop > _max_vmop_time) {
+    _max_vmop_time = spstat->_time_to_exec_vmop;
+  }
   // Only the sync time longer than the specified
   // PrintSafepointStatisticsTimeout will be printed out right away.
   // By default, it is -1 meaning all samples will be put into the list.
@@ -1119,40 +1138,42 @@
   } else {
     // The safepoint statistics will be printed out when the _safepoin_stats
     // array fills up.
-    if (_cur_stat_index != PrintSafepointStatisticsCount - 1) {
-      _cur_stat_index ++;
-    } else {
+    if (_cur_stat_index == PrintSafepointStatisticsCount - 1) {
       print_statistics();
       _cur_stat_index = 0;
-      tty->print_cr("");
+    } else {
+      _cur_stat_index++;
     }
   }
 }
 
 void SafepointSynchronize::print_statistics() {
-  int index;
   SafepointStats* sstats = _safepoint_stats;
 
-  for (index = 0; index <= _cur_stat_index; index++) {
+  for (int index = 0; index <= _cur_stat_index; index++) {
+    if (index % 30 == 0) {
+      print_header();
+    }
     sstats = &_safepoint_stats[index];
-    tty->print("%-28s       ["
+    tty->print("%.3f: ", sstats->_time_stamp);
+    tty->print("%-26s       ["
                INT32_FORMAT_W(8)INT32_FORMAT_W(11)INT32_FORMAT_W(15)
-               "]   ",
+               "    ]    ",
                sstats->_vmop_type == -1 ? "no vm operation" :
                VM_Operation::name(sstats->_vmop_type),
                sstats->_nof_total_threads,
                sstats->_nof_initial_running_threads,
                sstats->_nof_threads_wait_to_block);
     // "/ MICROUNITS " is to convert the unit from nanos to millis.
-    tty->print("       ["
-               INT64_FORMAT_W(6)INT64_FORMAT_W(6)INT64_FORMAT_W(6)
-               "]     "
-               "["INT64_FORMAT_W(6)INT64_FORMAT_W(9) "]          ",
+    tty->print("  ["
+               INT64_FORMAT_W(6)INT64_FORMAT_W(6)
+               INT64_FORMAT_W(6)INT64_FORMAT_W(6)
+               INT64_FORMAT_W(6)"    ]  ",
                sstats->_time_to_spin / MICROUNITS,
                sstats->_time_to_wait_to_block / MICROUNITS,
                sstats->_time_to_sync / MICROUNITS,
-               sstats->_time_to_exec_vmop / MICROUNITS,
-               sstats->_time_elapsed_since_last_safepoint / MICROUNITS);
+               sstats->_time_to_do_cleanups / MICROUNITS,
+               sstats->_time_to_exec_vmop / MICROUNITS);
 
     if (need_to_track_page_armed_status) {
       tty->print(INT32_FORMAT"         ", sstats->_page_armed);
@@ -1174,7 +1195,7 @@
   // don't print it out.
   // Approximate the vm op time.
   _safepoint_stats[_cur_stat_index]._time_to_exec_vmop =
-    os::javaTimeNanos() - sync_end_time;
+    os::javaTimeNanos() - cleanup_end_time;
 
   if ( PrintSafepointStatisticsTimeout < 0 ||
        spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
@@ -1203,6 +1224,9 @@
                 _coalesced_vmop_count);
   tty->print_cr("Maximum sync time  "INT64_FORMAT_W(5)" ms",
                 _max_sync_time / MICROUNITS);
+  tty->print_cr("Maximum vm operation time (except for Exit VM operation)  "
+                INT64_FORMAT_W(5)" ms",
+                _max_vmop_time / MICROUNITS);
 }
 
 // ------------------------------------------------------------------------------------------------
--- a/src/share/vm/runtime/safepoint.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/safepoint.hpp	Thu May 13 18:26:35 2010 -0700
@@ -65,6 +65,7 @@
   };
 
   typedef struct {
+    float  _time_stamp;                        // record when the current safepoint occurs in seconds
     int    _vmop_type;                         // type of VM operation triggers the safepoint
     int    _nof_total_threads;                 // total number of Java threads
     int    _nof_initial_running_threads;       // total number of initially seen running threads
@@ -73,14 +74,14 @@
     int    _nof_threads_hit_page_trap;         // total number of threads hitting the page trap
     jlong  _time_to_spin;                      // total time in millis spent in spinning
     jlong  _time_to_wait_to_block;             // total time in millis spent in waiting for to block
+    jlong  _time_to_do_cleanups;               // total time in millis spent in performing cleanups
     jlong  _time_to_sync;                      // total time in millis spent in getting to _synchronized
     jlong  _time_to_exec_vmop;                 // total time in millis spent in vm operation itself
-    jlong  _time_elapsed_since_last_safepoint; // time elasped since last safepoint
   } SafepointStats;
 
  private:
   static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquireing the Threads_lock
-  static volatile int _waiting_to_block;       // No. of threads we are waiting for to block.
+  static volatile int _waiting_to_block;       // number of threads we are waiting for to block
 
   // This counter is used for fast versions of jni_Get<Primitive>Field.
   // An even value means there is no ongoing safepoint operations.
@@ -91,19 +92,22 @@
 public:
   static volatile int _safepoint_counter;
 private:
-
-  static jlong   _last_safepoint;      // Time of last safepoint
+  static long       _end_of_last_safepoint;     // Time of last safepoint in milliseconds
 
   // statistics
-  static SafepointStats*  _safepoint_stats;     // array of SafepointStats struct
-  static int              _cur_stat_index;      // current index to the above array
-  static julong           _safepoint_reasons[]; // safepoint count for each VM op
-  static julong           _coalesced_vmop_count;// coalesced vmop count
-  static jlong            _max_sync_time;       // maximum sync time in nanos
+  static jlong            _safepoint_begin_time;     // time when safepoint begins
+  static SafepointStats*  _safepoint_stats;          // array of SafepointStats struct
+  static int              _cur_stat_index;           // current index to the above array
+  static julong           _safepoint_reasons[];      // safepoint count for each VM op
+  static julong           _coalesced_vmop_count;     // coalesced vmop count
+  static jlong            _max_sync_time;            // maximum sync time in nanos
+  static jlong            _max_vmop_time;            // maximum vm operation time in nanos
+  static float            _ts_of_current_safepoint;  // time stamp of current safepoint in seconds
 
   static void begin_statistics(int nof_threads, int nof_running);
   static void update_statistics_on_spin_end();
   static void update_statistics_on_sync_end(jlong end_time);
+  static void update_statistics_on_cleanup_end(jlong end_time);
   static void end_statistics(jlong end_time);
   static void print_statistics();
   inline static void inc_page_trap_count() {
@@ -140,7 +144,9 @@
   static void handle_polling_page_exception(JavaThread *thread);
 
   // VM Thread interface for determining safepoint rate
-  static long last_non_safepoint_interval()               { return os::javaTimeMillis() - _last_safepoint; }
+  static long last_non_safepoint_interval() {
+    return os::javaTimeMillis() - _end_of_last_safepoint;
+  }
   static bool is_cleanup_needed();
   static void do_cleanup_tasks();
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Thu May 13 18:26:35 2010 -0700
@@ -256,14 +256,19 @@
 // The continuation address is the entry point of the exception handler of the
 // previous frame depending on the return address.
 
-address SharedRuntime::raw_exception_handler_for_return_address(address return_address) {
+address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
   assert(frame::verify_return_pc(return_address), "must be a return pc");
 
+  // Reset MethodHandle flag.
+  thread->set_is_method_handle_return(false);
+
   // the fastest case first
   CodeBlob* blob = CodeCache::find_blob(return_address);
   if (blob != NULL && blob->is_nmethod()) {
     nmethod* code = (nmethod*)blob;
     assert(code != NULL, "nmethod must be present");
+    // Check if the return address is a MethodHandle call site.
+    thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
     // native nmethods don't have exception handlers
     assert(!code->is_native_method(), "no exception handler");
     assert(code->header_begin() != code->exception_begin(), "no exception handler");
@@ -289,6 +294,8 @@
     if (blob->is_nmethod()) {
       nmethod* code = (nmethod*)blob;
       assert(code != NULL, "nmethod must be present");
+      // Check if the return address is a MethodHandle call site.
+      thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
       assert(code->header_begin() != code->exception_begin(), "no exception handler");
       return code->exception_begin();
     }
@@ -309,10 +316,11 @@
 }
 
 
-JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address))
-  return raw_exception_handler_for_return_address(return_address);
+JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
+  return raw_exception_handler_for_return_address(thread, return_address);
 JRT_END
 
+
 address SharedRuntime::get_poll_stub(address pc) {
   address stub;
   // Look up the code blob
@@ -466,14 +474,11 @@
   }
 
 #ifdef COMPILER1
-  if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) {
-    // Exception is not handled by this frame so unwind.  Note that
-    // this is not the same as how C2 does this.  C2 emits a table
-    // entry that dispatches to the unwind code in the nmethod.
-    return NULL;
+  if (t == NULL && nm->is_compiled_by_c1()) {
+    assert(nm->unwind_handler_begin() != NULL, "");
+    return nm->unwind_handler_begin();
   }
-#endif /* COMPILER1 */
-
+#endif
 
   if (t == NULL) {
     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
@@ -587,7 +592,7 @@
           // 3. Implict null exception in nmethod
 
           if (!cb->is_nmethod()) {
-            guarantee(cb->is_adapter_blob(),
+            guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
                       "exception happened outside interpreter, nmethods and vtable stubs (1)");
             // There is no handler here, so we will simply unwind.
             return StubRoutines::throw_NullPointerException_at_call_entry();
@@ -892,12 +897,13 @@
   RegisterMap cbl_map(thread, false);
   frame caller_frame = thread->last_frame().sender(&cbl_map);
 
-  CodeBlob* cb = caller_frame.cb();
-  guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod");
+  CodeBlob* caller_cb = caller_frame.cb();
+  guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
+  nmethod* caller_nm = caller_cb->as_nmethod_or_null();
   // make sure caller is not getting deoptimized
   // and removed before we are done with it.
   // CLEANUP - with lazy deopt shouldn't need this lock
-  nmethodLocker caller_lock((nmethod*)cb);
+  nmethodLocker caller_lock(caller_nm);
 
 
   // determine call info & receiver
@@ -929,6 +935,13 @@
   }
 #endif
 
+  // JSR 292
+  // If the resolved method is a MethodHandle invoke target the call
+  // site must be a MethodHandle call site.
+  if (callee_method->is_method_handle_invoke()) {
+    assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
+  }
+
   // Compute entry points. This might require generation of C2I converter
   // frames, so we cannot be holding any locks here. Furthermore, the
   // computation of the entry points is independent of patching the call.  We
@@ -940,13 +953,12 @@
   StaticCallInfo static_call_info;
   CompiledICInfo virtual_call_info;
 
-
   // Make sure the callee nmethod does not get deoptimized and removed before
   // we are done patching the code.
-  nmethod* nm = callee_method->code();
-  nmethodLocker nl_callee(nm);
+  nmethod* callee_nm = callee_method->code();
+  nmethodLocker nl_callee(callee_nm);
 #ifdef ASSERT
-  address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below
+  address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
 #endif
 
   if (is_virtual) {
@@ -2077,7 +2089,6 @@
 
 // ---------------------------------------------------------------------------
 // Implementation of AdapterHandlerLibrary
-const char* AdapterHandlerEntry::name = "I2C/C2I adapters";
 AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
 const int AdapterHandlerLibrary_size = 16*K;
@@ -2129,7 +2140,7 @@
   ResourceMark rm;
 
   NOT_PRODUCT(int code_size);
-  BufferBlob *B = NULL;
+  AdapterBlob* B = NULL;
   AdapterHandlerEntry* entry = NULL;
   AdapterFingerPrint* fingerprint = NULL;
   {
@@ -2179,7 +2190,7 @@
 
     // Create I2C & C2I handlers
 
-    BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
+    BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
     if (buf != NULL) {
       CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size());
       short buffer_locs[20];
@@ -2208,7 +2219,7 @@
       }
 #endif
 
-      B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
+      B = AdapterBlob::create(&buffer);
       NOT_PRODUCT(code_size = buffer.code_size());
     }
     if (B == NULL) {
@@ -2240,7 +2251,7 @@
     jio_snprintf(blob_id,
                  sizeof(blob_id),
                  "%s(%s)@" PTR_FORMAT,
-                 AdapterHandlerEntry::name,
+                 B->name(),
                  fingerprint->as_string(),
                  B->instructions_begin());
     VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
--- a/src/share/vm/runtime/sharedRuntime.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Thu May 13 18:26:35 2010 -0700
@@ -96,10 +96,9 @@
   static jdouble dexp(jdouble x);
   static jdouble dpow(jdouble x, jdouble y);
 
-
   // exception handling across interpreter/compiler boundaries
-  static address raw_exception_handler_for_return_address(address return_address);
-  static address exception_handler_for_return_address(address return_address);
+  static address raw_exception_handler_for_return_address(JavaThread* thread, address return_address);
+  static address exception_handler_for_return_address(JavaThread* thread, address return_address);
 
 #ifndef SERIALGC
   // G1 write barriers
@@ -568,9 +567,6 @@
   AdapterHandlerEntry();
 
  public:
-  // The name we give all buffer blobs
-  static const char* name;
-
   address get_i2c_entry()            { return _i2c_entry; }
   address get_c2i_entry()            { return _c2i_entry; }
   address get_c2i_unverified_entry() { return _c2i_unverified_entry; }
--- a/src/share/vm/runtime/thread.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/thread.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1637,6 +1637,9 @@
     JNIHandleBlock::release_block(block);
   }
 
+  // These have to be removed while this is still a valid thread.
+  remove_stack_guard_pages();
+
   if (UseTLAB) {
     tlab().make_parsable(true);  // retire TLAB, if any
   }
@@ -2134,7 +2137,7 @@
   int allocate = os::allocate_stack_guard_pages();
   // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
 
-  if (allocate && !os::commit_memory((char *) low_addr, len)) {
+  if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
     warning("Attempt to allocate stack guard pages failed.");
     return;
   }
@@ -2155,7 +2158,7 @@
   size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
 
   if (os::allocate_stack_guard_pages()) {
-    if (os::uncommit_memory((char *) low_addr, len)) {
+    if (os::remove_stack_guard_pages((char *) low_addr, len)) {
       _stack_guard_state = stack_guard_unused;
     } else {
       warning("Attempt to deallocate stack guard pages failed.");
--- a/src/share/vm/runtime/thread.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/thread.hpp	Thu May 13 18:26:35 2010 -0700
@@ -772,7 +772,7 @@
   volatile address _exception_pc;                // PC where exception happened
   volatile address _exception_handler_pc;        // PC for handler of exception
   volatile int     _exception_stack_size;        // Size of frame where exception happened
-  volatile int     _is_method_handle_exception;  // True if the current exception PC is at a MethodHandle call.
+  volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 
   // support for compilation
   bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
@@ -1108,13 +1108,13 @@
   int      exception_stack_size() const          { return _exception_stack_size; }
   address  exception_pc() const                  { return _exception_pc; }
   address  exception_handler_pc() const          { return _exception_handler_pc; }
-  int      is_method_handle_exception() const    { return _is_method_handle_exception; }
+  bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
 
   void set_exception_oop(oop o)                  { _exception_oop = o; }
   void set_exception_pc(address a)               { _exception_pc = a; }
   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
   void set_exception_stack_size(int size)        { _exception_stack_size = size; }
-  void set_is_method_handle_exception(int value) { _is_method_handle_exception = value; }
+  void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
 
   // Stack overflow support
   inline size_t stack_available(address cur_sp);
@@ -1188,7 +1188,7 @@
   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
   static ByteSize exception_stack_size_offset()  { return byte_offset_of(JavaThread, _exception_stack_size); }
-  static ByteSize is_method_handle_exception_offset() { return byte_offset_of(JavaThread, _is_method_handle_exception); }
+  static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
 
--- a/src/share/vm/runtime/vframeArray.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/vframeArray.cpp	Thu May 13 18:26:35 2010 -0700
@@ -223,7 +223,7 @@
         break;
       case Deoptimization::Unpack_exception:
         // exception is pending
-        pc = SharedRuntime::raw_exception_handler_for_return_address(pc);
+        pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
         // [phh] We're going to end up in some handler or other, so it doesn't
         // matter what mdp we point to.  See exception_handler_for_exception()
         // in interpreterRuntime.cpp.
--- a/src/share/vm/runtime/vmThread.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Thu May 13 18:26:35 2010 -0700
@@ -25,6 +25,10 @@
 # include "incls/_precompiled.incl"
 # include "incls/_vmThread.cpp.incl"
 
+HS_DTRACE_PROBE_DECL3(hotspot, vmops__request, char *, uintptr_t, int);
+HS_DTRACE_PROBE_DECL3(hotspot, vmops__begin, char *, uintptr_t, int);
+HS_DTRACE_PROBE_DECL3(hotspot, vmops__end, char *, uintptr_t, int);
+
 // Dummy VM operation to act as first element in our circular double-linked list
 class VM_Dummy: public VM_Operation {
   VMOp_Type type() const { return VMOp_Dummy; }
@@ -132,6 +136,10 @@
 //-----------------------------------------------------------------
 // High-level interface
 bool VMOperationQueue::add(VM_Operation *op) {
+
+  HS_DTRACE_PROBE3(hotspot, vmops__request, op->name(), strlen(op->name()),
+                   op->evaluation_mode());
+
   // Encapsulates VM queue policy. Currently, that
   // only involves putting them on the right list
   if (op->evaluate_at_safepoint()) {
@@ -325,7 +333,11 @@
 
   {
     PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time());
+    HS_DTRACE_PROBE3(hotspot, vmops__begin, op->name(), strlen(op->name()),
+                     op->evaluation_mode());
     op->evaluate();
+    HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()),
+                     op->evaluation_mode());
   }
 
   // Last access of info in _cur_vm_operation!
--- a/src/share/vm/services/dtraceAttacher.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/services/dtraceAttacher.cpp	Thu May 13 18:26:35 2010 -0700
@@ -135,4 +135,9 @@
   }
 }
 
+void DTrace::set_monitor_dprobes(bool flag) {
+  // explicit setting of DTraceMonitorProbes flag
+  set_bool_flag("DTraceMonitorProbes", flag);
+}
+
 #endif /* SOLARIS */
--- a/src/share/vm/services/dtraceAttacher.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/services/dtraceAttacher.hpp	Thu May 13 18:26:35 2010 -0700
@@ -41,4 +41,6 @@
   static void detach_all_clients();
   // set ExtendedDTraceProbes flag
   static void set_extended_dprobes(bool value);
+  // set DTraceMonitorProbes flag
+  static void set_monitor_dprobes(bool value);
 };
--- a/src/share/vm/services/g1MemoryPool.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/services/g1MemoryPool.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 
 // See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
-  size_t young_list_length = g1h->young_list_length();
+  size_t young_list_length = g1h->young_list()->length();
   size_t eden_used = young_list_length * HeapRegion::GrainBytes;
   size_t survivor_used = survivor_space_used(g1h);
   eden_used = subtract_up_to_zero(eden_used, survivor_used);
--- a/src/share/vm/services/management.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/services/management.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1537,7 +1537,6 @@
     global->type = JMM_VMGLOBAL_TYPE_JSTRING;
   } else {
     global->type = JMM_VMGLOBAL_TYPE_UNKNOWN;
-    assert(false, "Unsupported VMGlobal Type");
     return false;
   }
 
--- a/src/share/vm/utilities/globalDefinitions.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Thu May 13 18:26:35 2010 -0700
@@ -827,6 +827,8 @@
 #define       badHeapWord       (::badHeapWordVal)
 #define       badJNIHandle      ((oop)::badJNIHandleVal)
 
+// Default TaskQueue size is 16K (32-bit) or 128K (64-bit)
+#define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17))
 
 //----------------------------------------------------------------------------------------------------
 // Utility functions for bitfield manipulations
--- a/src/share/vm/utilities/ostream.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/utilities/ostream.cpp	Thu May 13 18:26:35 2010 -0700
@@ -363,7 +363,7 @@
   return _log_file != NULL;
 }
 
-static const char* make_log_name(const char* log_name, const char* force_directory, char* buf) {
+static const char* make_log_name(const char* log_name, const char* force_directory) {
   const char* basename = log_name;
   char file_sep = os::file_separator()[0];
   const char* cp;
@@ -374,6 +374,27 @@
   }
   const char* nametail = log_name;
 
+  // Compute buffer length
+  size_t buffer_length;
+  if (force_directory != NULL) {
+    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
+                    strlen(basename) + 1;
+  } else {
+    buffer_length = strlen(log_name) + 1;
+  }
+
+  const char* star = strchr(basename, '*');
+  int star_pos = (star == NULL) ? -1 : (star - nametail);
+
+  char pid[32];
+  if (star_pos >= 0) {
+    jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id());
+    buffer_length += strlen(pid);
+  }
+
+  // Create big enough buffer.
+  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length);
+
   strcpy(buf, "");
   if (force_directory != NULL) {
     strcat(buf, force_directory);
@@ -381,14 +402,11 @@
     nametail = basename;       // completely skip directory prefix
   }
 
-  const char* star = strchr(basename, '*');
-  int star_pos = (star == NULL) ? -1 : (star - nametail);
-
   if (star_pos >= 0) {
     // convert foo*bar.log to foo123bar.log
     int buf_pos = (int) strlen(buf);
     strncpy(&buf[buf_pos], nametail, star_pos);
-    sprintf(&buf[buf_pos + star_pos], "%u", os::current_process_id());
+    strcpy(&buf[buf_pos + star_pos], pid);
     nametail += star_pos + 1;  // skip prefix and star
   }
 
@@ -399,20 +417,23 @@
 void defaultStream::init_log() {
   // %%% Need a MutexLocker?
   const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
-  char buf[O_BUFLEN*2];
-  const char* try_name = make_log_name(log_name, NULL, buf);
+  const char* try_name = make_log_name(log_name, NULL);
   fileStream* file = new(ResourceObj::C_HEAP) fileStream(try_name);
   if (!file->is_open()) {
     // Try again to open the file.
     char warnbuf[O_BUFLEN*2];
-    sprintf(warnbuf, "Warning:  Cannot open log file: %s\n", try_name);
+    jio_snprintf(warnbuf, sizeof(warnbuf),
+                 "Warning:  Cannot open log file: %s\n", try_name);
     // Note:  This feature is for maintainer use only.  No need for L10N.
     jio_print(warnbuf);
-    try_name = make_log_name("hs_pid*.log", os::get_temp_directory(), buf);
-    sprintf(warnbuf, "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
+    FREE_C_HEAP_ARRAY(char, try_name);
+    try_name = make_log_name("hs_pid*.log", os::get_temp_directory());
+    jio_snprintf(warnbuf, sizeof(warnbuf),
+                 "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
     jio_print(warnbuf);
     delete file;
     file = new(ResourceObj::C_HEAP) fileStream(try_name);
+    FREE_C_HEAP_ARRAY(char, try_name);
   }
   if (file->is_open()) {
     _log_file = file;
--- a/src/share/vm/utilities/taskqueue.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/utilities/taskqueue.cpp	Thu May 13 18:26:35 2010 -0700
@@ -31,10 +31,6 @@
 uint ParallelTaskTerminator::_total_peeks = 0;
 #endif
 
-bool TaskQueueSuper::peek() {
-  return _bottom != _age.top();
-}
-
 int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
   const int a =      16807;
   const int m = 2147483647;
@@ -180,6 +176,13 @@
   }
 }
 
+#ifdef ASSERT
+bool ObjArrayTask::is_valid() const {
+  return _obj != NULL && _obj->is_objArray() && _index > 0 &&
+    _index < objArrayOop(_obj)->length();
+}
+#endif // ASSERT
+
 bool RegionTaskQueueWithOverflow::is_empty() {
   return (_region_queue.size() == 0) &&
          (_overflow_stack->length() == 0);
--- a/src/share/vm/utilities/taskqueue.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/utilities/taskqueue.hpp	Thu May 13 18:26:35 2010 -0700
@@ -22,6 +22,7 @@
  *
  */
 
+template <unsigned int N>
 class TaskQueueSuper: public CHeapObj {
 protected:
   // Internal type for indexing the queue; also used for the tag.
@@ -30,10 +31,7 @@
   // The first free element after the last one pushed (mod N).
   volatile uint _bottom;
 
-  enum {
-    N = 1 << NOT_LP64(14) LP64_ONLY(17), // Queue size: 16K or 128K
-    MOD_N_MASK = N - 1                   // To compute x mod N efficiently.
-  };
+  enum { MOD_N_MASK = N - 1 };
 
   class Age {
   public:
@@ -84,12 +82,12 @@
 
   // Returns a number in the range [0..N).  If the result is "N-1", it should be
   // interpreted as 0.
-  uint dirty_size(uint bot, uint top) {
+  uint dirty_size(uint bot, uint top) const {
     return (bot - top) & MOD_N_MASK;
   }
 
   // Returns the size corresponding to the given "bot" and "top".
-  uint size(uint bot, uint top) {
+  uint size(uint bot, uint top) const {
     uint sz = dirty_size(bot, top);
     // Has the queue "wrapped", so that bottom is less than top?  There's a
     // complicated special case here.  A pair of threads could perform pop_local
@@ -111,17 +109,17 @@
 public:
   TaskQueueSuper() : _bottom(0), _age() {}
 
-  // Return "true" if the TaskQueue contains any tasks.
-  bool peek();
+  // Return true if the TaskQueue contains any tasks.
+  bool peek() { return _bottom != _age.top(); }
 
   // Return an estimate of the number of elements in the queue.
   // The "careful" version admits the possibility of pop_local/pop_global
   // races.
-  uint size() {
+  uint size() const {
     return size(_bottom, _age.top());
   }
 
-  uint dirty_size() {
+  uint dirty_size() const {
     return dirty_size(_bottom, _age.top());
   }
 
@@ -132,16 +130,36 @@
 
   // Maximum number of elements allowed in the queue.  This is two less
   // than the actual queue size, for somewhat complicated reasons.
-  uint max_elems() { return N - 2; }
+  uint max_elems() const { return N - 2; }
+
+  // Total size of queue.
+  static const uint total_size() { return N; }
 };
 
-template<class E> class GenericTaskQueue: public TaskQueueSuper {
+template<class E, unsigned int N = TASKQUEUE_SIZE>
+class GenericTaskQueue: public TaskQueueSuper<N> {
+protected:
+  typedef typename TaskQueueSuper<N>::Age Age;
+  typedef typename TaskQueueSuper<N>::idx_t idx_t;
+
+  using TaskQueueSuper<N>::_bottom;
+  using TaskQueueSuper<N>::_age;
+  using TaskQueueSuper<N>::increment_index;
+  using TaskQueueSuper<N>::decrement_index;
+  using TaskQueueSuper<N>::dirty_size;
+
+public:
+  using TaskQueueSuper<N>::max_elems;
+  using TaskQueueSuper<N>::size;
+
 private:
   // Slow paths for push, pop_local.  (pop_global has no fast path.)
   bool push_slow(E t, uint dirty_n_elems);
   bool pop_local_slow(uint localBot, Age oldAge);
 
 public:
+  typedef E element_type;
+
   // Initializes the queue to empty.
   GenericTaskQueue();
 
@@ -172,19 +190,19 @@
   volatile E* _elems;
 };
 
-template<class E>
-GenericTaskQueue<E>::GenericTaskQueue():TaskQueueSuper() {
+template<class E, unsigned int N>
+GenericTaskQueue<E, N>::GenericTaskQueue() {
   assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
 }
 
-template<class E>
-void GenericTaskQueue<E>::initialize() {
+template<class E, unsigned int N>
+void GenericTaskQueue<E, N>::initialize() {
   _elems = NEW_C_HEAP_ARRAY(E, N);
   guarantee(_elems != NULL, "Allocation failed.");
 }
 
-template<class E>
-void GenericTaskQueue<E>::oops_do(OopClosure* f) {
+template<class E, unsigned int N>
+void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
   // tty->print_cr("START OopTaskQueue::oops_do");
   uint iters = size();
   uint index = _bottom;
@@ -200,21 +218,21 @@
   // tty->print_cr("END OopTaskQueue::oops_do");
 }
 
-
-template<class E>
-bool GenericTaskQueue<E>::push_slow(E t, uint dirty_n_elems) {
+template<class E, unsigned int N>
+bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
   if (dirty_n_elems == N - 1) {
     // Actually means 0, so do the push.
     uint localBot = _bottom;
-    _elems[localBot] = t;
+    // g++ complains if the volatile result of the assignment is unused.
+    const_cast<E&>(_elems[localBot] = t);
     OrderAccess::release_store(&_bottom, increment_index(localBot));
     return true;
   }
   return false;
 }
 
-template<class E>
-bool GenericTaskQueue<E>::
+template<class E, unsigned int N>
+bool GenericTaskQueue<E, N>::
 pop_local_slow(uint localBot, Age oldAge) {
   // This queue was observed to contain exactly one element; either this
   // thread will claim it, or a competing "pop_global".  In either case,
@@ -246,8 +264,8 @@
   return false;
 }
 
-template<class E>
-bool GenericTaskQueue<E>::pop_global(E& t) {
+template<class E, unsigned int N>
+bool GenericTaskQueue<E, N>::pop_global(E& t) {
   Age oldAge = _age.get();
   uint localBot = _bottom;
   uint n_elems = size(localBot, oldAge.top());
@@ -255,7 +273,7 @@
     return false;
   }
 
-  t = _elems[oldAge.top()];
+  const_cast<E&>(t = _elems[oldAge.top()]);
   Age newAge(oldAge);
   newAge.increment();
   Age resAge = _age.cmpxchg(newAge, oldAge);
@@ -266,8 +284,8 @@
   return resAge == oldAge;
 }
 
-template<class E>
-GenericTaskQueue<E>::~GenericTaskQueue() {
+template<class E, unsigned int N>
+GenericTaskQueue<E, N>::~GenericTaskQueue() {
   FREE_C_HEAP_ARRAY(E, _elems);
 }
 
@@ -280,16 +298,18 @@
   virtual bool peek() = 0;
 };
 
-template<class E> class GenericTaskQueueSet: public TaskQueueSetSuper {
+template<class T>
+class GenericTaskQueueSet: public TaskQueueSetSuper {
 private:
   uint _n;
-  GenericTaskQueue<E>** _queues;
+  T** _queues;
 
 public:
+  typedef typename T::element_type E;
+
   GenericTaskQueueSet(int n) : _n(n) {
-    typedef GenericTaskQueue<E>* GenericTaskQueuePtr;
+    typedef T* GenericTaskQueuePtr;
     _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
-    guarantee(_queues != NULL, "Allocation failure.");
     for (int i = 0; i < n; i++) {
       _queues[i] = NULL;
     }
@@ -299,9 +319,9 @@
   bool steal_best_of_2(uint queue_num, int* seed, E& t);
   bool steal_best_of_all(uint queue_num, int* seed, E& t);
 
-  void register_queue(uint i, GenericTaskQueue<E>* q);
+  void register_queue(uint i, T* q);
 
-  GenericTaskQueue<E>* queue(uint n);
+  T* queue(uint n);
 
   // The thread with queue number "queue_num" (and whose random number seed
   // is at "seed") is trying to steal a task from some other queue.  (It
@@ -313,27 +333,27 @@
   bool peek();
 };
 
-template<class E>
-void GenericTaskQueueSet<E>::register_queue(uint i, GenericTaskQueue<E>* q) {
+template<class T> void
+GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
   assert(i < _n, "index out of range.");
   _queues[i] = q;
 }
 
-template<class E>
-GenericTaskQueue<E>* GenericTaskQueueSet<E>::queue(uint i) {
+template<class T> T*
+GenericTaskQueueSet<T>::queue(uint i) {
   return _queues[i];
 }
 
-template<class E>
-bool GenericTaskQueueSet<E>::steal(uint queue_num, int* seed, E& t) {
+template<class T> bool
+GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
   for (uint i = 0; i < 2 * _n; i++)
     if (steal_best_of_2(queue_num, seed, t))
       return true;
   return false;
 }
 
-template<class E>
-bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t) {
+template<class T> bool
+GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
   if (_n > 2) {
     int best_k;
     uint best_sz = 0;
@@ -356,8 +376,8 @@
   }
 }
 
-template<class E>
-bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) {
+template<class T> bool
+GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
   if (_n > 2) {
     uint k = queue_num;
     while (k == queue_num) k = randomParkAndMiller(seed) % _n;
@@ -372,8 +392,8 @@
   }
 }
 
-template<class E>
-bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) {
+template<class T> bool
+GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
   if (_n > 2) {
     uint k1 = queue_num;
     while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
@@ -394,8 +414,8 @@
   }
 }
 
-template<class E>
-bool GenericTaskQueueSet<E>::peek() {
+template<class T>
+bool GenericTaskQueueSet<T>::peek() {
   // Try all the queues.
   for (uint j = 0; j < _n; j++) {
     if (_queues[j]->peek())
@@ -465,14 +485,16 @@
 #endif
 };
 
-template<class E> inline bool GenericTaskQueue<E>::push(E t) {
+template<class E, unsigned int N> inline bool
+GenericTaskQueue<E, N>::push(E t) {
   uint localBot = _bottom;
   assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
   idx_t top = _age.top();
   uint dirty_n_elems = dirty_size(localBot, top);
-  assert((dirty_n_elems >= 0) && (dirty_n_elems < N), "n_elems out of range.");
+  assert(dirty_n_elems < N, "n_elems out of range.");
   if (dirty_n_elems < max_elems()) {
-    _elems[localBot] = t;
+    // g++ complains if the volatile result of the assignment is unused.
+    const_cast<E&>(_elems[localBot] = t);
     OrderAccess::release_store(&_bottom, increment_index(localBot));
     return true;
   } else {
@@ -480,7 +502,8 @@
   }
 }
 
-template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
+template<class E, unsigned int N> inline bool
+GenericTaskQueue<E, N>::pop_local(E& t) {
   uint localBot = _bottom;
   // This value cannot be N-1.  That can only occur as a result of
   // the assignment to bottom in this method.  If it does, this method
@@ -494,7 +517,7 @@
   // This is necessary to prevent any read below from being reordered
   // before the store just above.
   OrderAccess::fence();
-  t = _elems[localBot];
+  const_cast<E&>(t = _elems[localBot]);
   // This is a second read of "age"; the "size()" above is the first.
   // If there's still at least one element in the queue, based on the
   // "_bottom" and "age" we've read, then there can be no interference with
@@ -511,17 +534,23 @@
 }
 
 typedef oop Task;
-typedef GenericTaskQueue<Task>         OopTaskQueue;
-typedef GenericTaskQueueSet<Task>      OopTaskQueueSet;
+typedef GenericTaskQueue<Task>            OopTaskQueue;
+typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
 
-
-#define COMPRESSED_OOP_MASK  1
+#ifdef _MSC_VER
+#pragma warning(push)
+// warning C4522: multiple assignment operators specified
+#pragma warning(disable:4522)
+#endif
 
 // This is a container class for either an oop* or a narrowOop*.
 // Both are pushed onto a task queue and the consumer will test is_narrow()
 // to determine which should be processed.
 class StarTask {
   void*  _holder;        // either union oop* or narrowOop*
+
+  enum { COMPRESSED_OOP_MASK = 1 };
+
  public:
   StarTask(narrowOop* p) {
     assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
@@ -537,20 +566,61 @@
     return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
   }
 
-  // Operators to preserve const/volatile in assignments required by gcc
-  void operator=(const volatile StarTask& t) volatile { _holder = t._holder; }
+  StarTask& operator=(const StarTask& t) {
+    _holder = t._holder;
+    return *this;
+  }
+  volatile StarTask& operator=(const volatile StarTask& t) volatile {
+    _holder = t._holder;
+    return *this;
+  }
 
   bool is_narrow() const {
     return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
   }
 };
 
-typedef GenericTaskQueue<StarTask>     OopStarTaskQueue;
-typedef GenericTaskQueueSet<StarTask>  OopStarTaskQueueSet;
+class ObjArrayTask
+{
+public:
+  ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
+  ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
+    assert(idx <= size_t(max_jint), "too big");
+  }
+  ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
+
+  ObjArrayTask& operator =(const ObjArrayTask& t) {
+    _obj = t._obj;
+    _index = t._index;
+    return *this;
+  }
+  volatile ObjArrayTask&
+  operator =(const volatile ObjArrayTask& t) volatile {
+    _obj = t._obj;
+    _index = t._index;
+    return *this;
+  }
+
+  inline oop obj()   const { return _obj; }
+  inline int index() const { return _index; }
+
+  DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
+
+private:
+  oop _obj;
+  int _index;
+};
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+typedef GenericTaskQueue<StarTask>            OopStarTaskQueue;
+typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
 
 typedef size_t RegionTask;  // index for region
-typedef GenericTaskQueue<RegionTask>    RegionTaskQueue;
-typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet;
+typedef GenericTaskQueue<RegionTask>         RegionTaskQueue;
+typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
 
 class RegionTaskQueueWithOverflow: public CHeapObj {
  protected:
--- a/src/share/vm/utilities/vmError.cpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/utilities/vmError.cpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -212,6 +212,51 @@
   return buf;
 }
 
+void VMError::print_stack_trace(outputStream* st, JavaThread* jt,
+                                char* buf, int buflen, bool verbose) {
+#ifdef ZERO
+  if (jt->zero_stack()->sp() && jt->top_zero_frame()) {
+    // StackFrameStream uses the frame anchor, which may not have
+    // been set up.  This can be done at any time in Zero, however,
+    // so if it hasn't been set up then we just set it up now and
+    // clear it again when we're done.
+    bool has_last_Java_frame = jt->has_last_Java_frame();
+    if (!has_last_Java_frame)
+      jt->set_last_Java_frame();
+    st->print("Java frames:");
+
+    // If the top frame is a Shark frame and the frame anchor isn't
+    // set up then it's possible that the information in the frame
+    // is garbage: it could be from a previous decache, or it could
+    // simply have never been written.  So we print a warning...
+    StackFrameStream sfs(jt);
+    if (!has_last_Java_frame && !sfs.is_done()) {
+      if (sfs.current()->zeroframe()->is_shark_frame()) {
+        st->print(" (TOP FRAME MAY BE JUNK)");
+      }
+    }
+    st->cr();
+
+    // Print the frames
+    for(int i = 0; !sfs.is_done(); sfs.next(), i++) {
+      sfs.current()->zero_print_on_error(i, st, buf, buflen);
+      st->cr();
+    }
+
+    // Reset the frame anchor if necessary
+    if (!has_last_Java_frame)
+      jt->reset_last_Java_frame();
+  }
+#else
+  if (jt->has_last_Java_frame()) {
+    st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
+    for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
+      sfs.current()->print_on_error(st, buf, buflen, verbose);
+      st->cr();
+    }
+  }
+#endif // ZERO
+}
 
 // This is the main function to report a fatal error. Only one thread can
 // call this function, so we don't need to worry about MT-safety. But it's
@@ -457,49 +502,7 @@
   STEP(130, "(printing Java stack)" )
 
      if (_verbose && _thread && _thread->is_Java_thread()) {
-       JavaThread* jt = (JavaThread*)_thread;
-#ifdef ZERO
-       if (jt->zero_stack()->sp() && jt->top_zero_frame()) {
-         // StackFrameStream uses the frame anchor, which may not have
-         // been set up.  This can be done at any time in Zero, however,
-         // so if it hasn't been set up then we just set it up now and
-         // clear it again when we're done.
-         bool has_last_Java_frame = jt->has_last_Java_frame();
-         if (!has_last_Java_frame)
-           jt->set_last_Java_frame();
-         st->print("Java frames:");
-
-         // If the top frame is a Shark frame and the frame anchor isn't
-         // set up then it's possible that the information in the frame
-         // is garbage: it could be from a previous decache, or it could
-         // simply have never been written.  So we print a warning...
-         StackFrameStream sfs(jt);
-         if (!has_last_Java_frame && !sfs.is_done()) {
-           if (sfs.current()->zeroframe()->is_shark_frame()) {
-             st->print(" (TOP FRAME MAY BE JUNK)");
-           }
-         }
-         st->cr();
-
-         // Print the frames
-         for(int i = 0; !sfs.is_done(); sfs.next(), i++) {
-           sfs.current()->zero_print_on_error(i, st, buf, sizeof(buf));
-           st->cr();
-         }
-
-         // Reset the frame anchor if necessary
-         if (!has_last_Java_frame)
-           jt->reset_last_Java_frame();
-       }
-#else
-       if (jt->has_last_Java_frame()) {
-         st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
-         for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
-           sfs.current()->print_on_error(st, buf, sizeof(buf));
-           st->cr();
-         }
-       }
-#endif // ZERO
+       print_stack_trace(st, (JavaThread*)_thread, buf, sizeof(buf));
      }
 
   STEP(135, "(printing target Java thread stack)" )
@@ -509,13 +512,7 @@
        JavaThread*  jt = ((NamedThread *)_thread)->processed_thread();
        if (jt != NULL) {
          st->print_cr("JavaThread " PTR_FORMAT " (nid = " UINTX_FORMAT ") was being processed", jt, jt->osthread()->thread_id());
-         if (jt->has_last_Java_frame()) {
-           st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
-           for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
-             sfs.current()->print_on_error(st, buf, sizeof(buf), true);
-             st->cr();
-           }
-         }
+         print_stack_trace(st, jt, buf, sizeof(buf), true);
        }
      }
 
@@ -807,8 +804,8 @@
       if (fd == -1) {
         // try temp directory
         const char * tmpdir = os::get_temp_directory();
-        jio_snprintf(buffer, sizeof(buffer), "%shs_err_pid%u.log",
-                     (tmpdir ? tmpdir : ""), os::current_process_id());
+        jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
+                     tmpdir, os::file_separator(), os::current_process_id());
         fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
       }
 
--- a/src/share/vm/utilities/vmError.hpp	Mon May 10 19:45:24 2010 -0700
+++ b/src/share/vm/utilities/vmError.hpp	Thu May 13 18:26:35 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,10 @@
   // generate an error report
   void report(outputStream* st);
 
+  // generate a stack trace
+  static void print_stack_trace(outputStream* st, JavaThread* jt,
+                                char* buf, int buflen, bool verbose = false);
+
   // accessor
   const char* message()         { return _message; }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6431242/Test.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6431242
+ * @run main/othervm -server -XX:+PrintCompilation Test
+ */
+
+public class Test{
+
+  int    _len      = 8;
+  int[]  _arr_i    = new int[_len];
+  long[] _arr_l    = new long[_len];
+
+  int[]  _arr_i_cp = new int [_len];
+  long[] _arr_l_cp = new long [_len];
+
+  int _k     = 0x12345678;
+  int _j     = 0;
+  int _ir    = 0x78563412;
+  int _ir1   = 0x78563413;
+  int _ir2   = 0x79563412;
+
+  long _m    = 0x123456789abcdef0L;
+  long _l    = 0L;
+  long _lr   = 0xf0debc9a78563412L;
+  long _lr1  = 0xf0debc9a78563413L;
+  long _lr2  = 0xf1debc9a78563412L;
+
+  void init() {
+    for (int i=0; i<_arr_i.length; i++) {
+      _arr_i[i] = _k;
+      _arr_l[i] = _m;
+    }
+  }
+
+  public int test_int_reversed(int i) {
+    return Integer.reverseBytes(i);
+  }
+
+  public long test_long_reversed(long i) {
+    return Long.reverseBytes(i);
+  }
+
+  public void test_copy_ints(int[] dst, int[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Integer.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_ints_reversed(int[] dst, int[] src) {
+    for (int i=0; i<src.length; i++) {
+      dst[i] = 1 + Integer.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_ints_store_reversed(int[] dst, int[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Integer.reverseBytes(1 + src[i]);
+    }
+  }
+
+  public void test_copy_longs(long[] dst, long[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Long.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_longs_reversed(long[] dst, long[] src) {
+    for (int i=0; i<src.length; i++) {
+      dst[i] = 1 + Long.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_longs_store_reversed(long[] dst, long[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Long.reverseBytes(1 + src[i]);
+    }
+  }
+
+  public void test() throws Exception {
+    int up_limit=90000;
+
+
+    //test single
+
+    for (int loop=0; loop<up_limit; loop++) {
+      _j = test_int_reversed(_k);
+      if (_j != _ir ) {
+        throw new Exception("Interger.reverseBytes failed " + _j + " iter " + loop);
+      }
+      _l = test_long_reversed(_m);
+      if (_l != _lr ) {
+        throw new Exception("Long.reverseBytes failed " + _l + " iter " + loop);
+      }
+    }
+
+    // test scalar load/store
+    for (int loop=0; loop<up_limit; loop++) {
+
+      test_copy_ints(_arr_i_cp, _arr_i);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_i_cp[j] != _ir) {
+          throw new Exception("Interger.reverseBytes failed test_copy_ints iter " + loop);
+        }
+      }
+
+      test_copy_ints_reversed(_arr_i_cp, _arr_i);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_i_cp[j] != _ir1) {
+          throw new Exception("Interger.reverseBytes failed test_copy_ints_reversed iter " + loop);
+        }
+      }
+      test_copy_ints_store_reversed(_arr_i_cp, _arr_i);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_i_cp[j] != _ir2) {
+          throw new Exception("Interger.reverseBytes failed test_copy_ints_store_reversed iter " + loop);
+        }
+      }
+
+      test_copy_longs(_arr_l_cp, _arr_l);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_l_cp[j] != _lr) {
+          throw new Exception("Long.reverseBytes failed test_copy_longs iter " + loop);
+        }
+      }
+      test_copy_longs_reversed(_arr_l_cp, _arr_l);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_l_cp[j] != _lr1) {
+          throw new Exception("Long.reverseBytes failed test_copy_longs_reversed iter " + loop);
+        }
+      }
+      test_copy_longs_store_reversed(_arr_l_cp, _arr_l);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_l_cp[j] != _lr2) {
+          throw new Exception("Long.reverseBytes failed test_copy_longs_store_reversed iter " + loop);
+        }
+      }
+
+    }
+  }
+
+  public static void main(String args[]) {
+    try {
+      Test t = new Test();
+      t.init();
+      t.test();
+      System.out.println("Passed");
+    }catch (Exception e) {
+      e.printStackTrace();
+      System.out.println("Failed");
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6663854/Test6663854.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,1521 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6663854
+ * @summary assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
+ *
+ * @run main/othervm -Xcomp Test6663854
+ */
+
+// This is a randomly generated test that exposed a crash so don't try
+// to make sense of what's it's doing.  The output produced is likely
+// to be stable but it is not being checked as part of this test.
+
+final class Test6663854_Class_0 {
+    final long var_1 = ((byte)1.2510591E38F & - ~ - - - ~3554133935918431232L) << 'g';
+    final static long var_2 = 662144491976981504L;
+    static byte[] var_3;
+    static byte var_4;
+    static float var_5;
+    final short var_6 = 4156;
+    double var_7;
+    char var_8 = 'F';
+    static long var_9;
+    char var_10;
+
+
+    public Test6663854_Class_0()
+    {
+        var_7 = (var_9 = (var_4 = (var_4 = (byte)~ -var_1)));
+        var_7 = (var_4 = (var_4 = (byte)(var_9 = 690685817))) + ~var_2;
+        long var_17 = 1755837857030316032L;
+        var_8 *= (var_7 = var_6);
+        {
+            var_4 = (var_4 = (byte)var_6);
+        }
+        var_5 = (var_4 = (var_4 = (var_4 = (var_4 = (byte)var_17))));
+        if (false)
+        {
+            var_5 = var_6;
+            var_4 = (var_4 = (var_4 = (var_4 = (var_4 = (var_4 = (var_4 = (byte)3.2446162E38F))))));
+        }
+        else
+        {
+            var_8 &= false ? var_6 : var_6;
+        }
+        ((new Test6663854_Class_0[(byte)+ (var_5 = (var_4 = (byte)'Q'))])[var_4 = (byte)396008820]).var_8++;
+        var_5 = (var_8 ^= var_6 >>> - (var_4 = (byte)var_6)) >= 360526660 ? var_8 : var_8;
+    }
+
+
+
+    protected Object clone()
+    {
+        char var_11 = 'E';
+        var_7 = 1.2181972357945285E308 + var_6 - + (var_7 = 1269180234) * 6.244754764669101E307;
+        var_7 = (var_4 = (var_4 = (var_4 = (var_4 = (var_4 = (byte)var_11)))));
+        {
+            var_4 = (var_4 = (var_4 = (byte)1016847968));
+            (false & true ? "nmot" : "m").charAt((false || !true ? var_6 : var_6) * var_6);
+            var_8 |= var_8-- == (long)(var_7 = (byte)var_2 & ~var_2) ? (var_4 = (var_4 = (var_4 = (var_4 = (byte)1.5012703E38F)))) : (var_4 = (byte)4795604615834685440L);
+            ++var_11;
+            var_5 = (var_9 = (int)var_6 | 302324412 - 1720735728);
+            "yfkasl".replaceAll("xlvn" + "peedfph", "awfbuujts");
+        }
+        var_9 = false ? var_6 : var_6;
+        {
+            final boolean var_12 = false;
+        }
+        var_5 = var_2;
+        {
+            var_9 = ((new byte[(byte)var_6][(byte)1.3719104187525612E308])[var_4 = (byte)var_1])[var_4 = (var_4 = (var_4 = (var_4 = (byte)2.2549062E38F)))] + 889886326;
+        }
+        var_9 = (var_2 | ~var_1) & 848602225;
+        {
+            var_4 = (var_4 = (var_4 = (var_4 = (byte)var_6)));
+        }
+        var_7 = var_6;
+        var_9 = var_6;
+        var_5 = (var_4 = (var_4 = (byte)var_8));
+        var_7 = 964691433430251520L ^ var_6;
+        var_5 = ~2571981928559810560L;
+        return "yvetn".endsWith("dtgstxcu".toLowerCase()) | true | true ? (4.2416016638902373E307 != -3.1295498440444366E307 ? "p" : "ars") : (new Object[(byte)var_1])[(byte)(var_9 = 7519039949758987264L)];
+    }
+
+    public boolean equals(Object obj)
+    {
+        var_5 = 1445825238;
+        {
+            var_7 = 2.221982E38F;
+        }
+        {
+            var_9 = var_6;
+        }
+        var_7 = true ? (var_8 ^= (byte)3588201925057082368L) : (var_7 = var_8);
+        short var_13 = var_6;
+        var_4 = (var_4 = (var_4 = (var_4 = (var_4 = (byte)'L'))));
+        var_13--;
+        return true | !false;
+    }
+
+
+    protected char[] func_0()
+    {
+        if (!true ? !false : false)
+        {
+            var_8 %= -(new double[(byte)197311342][(byte)5.6183004E36F])[(byte)(var_5 = (var_9 = 'l'))][(byte)(var_7 = var_6)] * (1.9583867E38F * + (var_4 = (byte)1566742425));
+            var_8 >>= ~6333520277515092992L | var_8;
+        }
+        else
+        {
+            new String();
+        }
+        {
+            var_7 = (byte)5882830303456225280L % ((var_9 = var_6) << var_1);
+        }
+        var_7 = var_6;
+        float var_14;
+        var_14 = (var_4 = (var_4 = (byte)1081376784));
+        var_8 -= 3.1929636E38F;
+        var_7 = 1.5931970715760934E308;
+        var_4 = true | (!false | (var_7 = 2.4773615E37F) < (byte)(short)825088022) ? (var_4 = (byte)4.3114896E37F) : (byte)var_8;
+        final int var_15 = var_6 >> 1197848918;
+        var_9 = var_15;
+        var_14 = 983699379;
+        var_9 = var_6;
+        return (new char[var_4 = (byte)3.593425789671245E307][(byte)1.6242754453980546E308])[var_4 = (byte)(var_9 = (var_4 = (byte)var_1))];
+    }
+
+    final void func_1(final int arg_0)
+    {
+        var_5 = 6370513305314412544L == -5.201821E37F ? 2.5220462E38F : (var_5 = (byte)'f');
+        ((new Test6663854_Class_0[(byte)1250580004024059904L][(byte)(short)var_8])[(byte)(var_8 ^= 68680455462355968L)][var_4 = (var_4 = (byte)var_2)]).var_8 *= (char)(byte)(var_6 * var_6) <= - (var_7 = (var_7 = var_6)) ? ~5194741848806877184L : var_8;
+        var_5 = !"tshhykoap".endsWith("q") ? arg_0 : 'K';
+        var_8 *= (var_8 -= (var_5 = - (var_5 = var_8)));
+        int var_16;
+    }
+
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854_Class_0.var_8 = "; result += Test6663854.Printer.print(var_8);
+        result += "\n";
+        result += "Test6663854_Class_0.var_10 = "; result += Test6663854.Printer.print(var_10);
+        result += "\n";
+        result += "Test6663854_Class_0.var_3 = "; result += Test6663854.Printer.print(var_3);
+        result += "\n";
+        result += "Test6663854_Class_0.var_1 = "; result += Test6663854.Printer.print(var_1);
+        result += "\n";
+        result += "Test6663854_Class_0.var_2 = "; result += Test6663854.Printer.print(var_2);
+        result += "\n";
+        result += "Test6663854_Class_0.var_9 = "; result += Test6663854.Printer.print(var_9);
+        result += "\n";
+        result += "Test6663854_Class_0.var_7 = "; result += Test6663854.Printer.print(var_7);
+        result += "\n";
+        result += "Test6663854_Class_0.var_5 = "; result += Test6663854.Printer.print(var_5);
+        result += "\n";
+        result += "Test6663854_Class_0.var_6 = "; result += Test6663854.Printer.print(var_6);
+        result += "\n";
+        result += "Test6663854_Class_0.var_4 = "; result += Test6663854.Printer.print(var_4);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+}
+
+
+interface Test6663854_Interface_1 {
+    public boolean func_0();
+    public String func_1(final String arg_0, final long arg_1, byte arg_2, char[][] arg_3);
+}
+
+
+interface Test6663854_Interface_2 extends Test6663854_Interface_1 {
+}
+
+
+final class Test6663854_Class_3 implements Test6663854_Interface_2, Test6663854_Interface_1 {
+    Object var_18;
+    long var_19;
+    static double var_20;
+    final static float var_21 = 2.0644203E38F;
+    Test6663854_Class_0 var_22;
+    final static byte var_23 = 110;
+    static boolean var_24 = false;
+
+
+    public Test6663854_Class_3()
+    {
+        Test6663854_Class_0.var_5 = new Test6663854_Class_0().var_6;
+        var_22 = (var_22 = (new Test6663854_Class_0[var_23])[var_23]);
+        Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = var_23));
+        char var_37;
+        var_19 = var_23;
+    }
+
+
+    public boolean func_0()
+    {
+        if (var_24 = var_24)
+        {
+            Test6663854_Class_0.var_4 = var_24 ^ (var_24 || (var_24 = false)) ? var_23 : (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = var_23));
+        }
+        else
+        {
+            ((Test6663854_Class_0)(var_18 = new Test6663854_Interface_1[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = var_23)][var_23])).var_7 = ((var_22 = (var_22 = (Test6663854_Class_0)(var_18 = "uwt"))).var_8 >>>= (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = var_23)));
+            var_24 |= !false;
+        }
+        var_18 = (var_18 = "ymmk");
+        if (var_24)
+        {
+            long var_25;
+            ((Test6663854_Class_0)(var_18 = "wsxqujs")).var_8 >>= 'F';
+            var_24 ^= false;
+        }
+        else
+        {
+            {
+                (var_22 = (Test6663854_Class_0)(var_18 = (var_18 = (var_18 = "lrgiwpwet")))).var_8++;
+            }
+            {
+                var_18 = new String[Test6663854_Class_0.var_4 = var_23];
+            }
+            {
+                new String();
+            }
+            ((var_24 |= var_24) | true ^ false ? (var_22 = (Test6663854_Class_0)(var_18 = "rpmudju")) : (var_22 = (new Test6663854_Class_0[var_23])[var_23])).var_8--;
+            var_24 ^= var_24;
+            var_19 = 206416809;
+            var_20 = 1581809112;
+            Test6663854_Class_0 var_26;
+            Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = var_23);
+        }
+        byte var_27 = 29;
+        short var_28 = 24653;
+        var_22 = (var_22 = (var_22 = (Test6663854_Class_0)(var_18 = "umagt")));
+        return var_24;
+    }
+
+    public String func_1(final String arg_0, final long arg_1, byte arg_2, char[][] arg_3)
+    {
+        {
+            final Test6663854_Class_0 var_29 = (new Test6663854_Class_0[var_23])[arg_2];
+        }
+        var_18 = (var_18 = (var_22 = (Test6663854_Class_0)(var_18 = arg_0)));
+        ++(var_22 = (Test6663854_Class_0)(var_18 = (Test6663854_Class_0)(var_18 = (var_18 = new char[var_23])))).var_8;
+        {
+            (var_22 = (var_22 = (Test6663854_Class_0)(var_18 = "xcj"))).var_8 |= arg_1 * (short)6.671469496987355E307;
+            Test6663854_Class_0.var_5 = false ? var_23 : 8012291795221583872L;
+            (var_24 ? (var_22 = (var_22 = (Test6663854_Class_0)(var_18 = arg_0))) : (var_22 = (Test6663854_Class_0)(var_18 = arg_3))).var_8--;
+        }
+        ((new String[var_23][var_23])[var_23][var_23]).compareTo(arg_0);
+        var_18 = (var_18 = arg_0);
+        var_24 &= (var_24 ^= true) & ! !false;
+        {
+            var_18 = "lok";
+        }
+        float var_30 = 2.0346904E38F;
+        var_30 += ((Test6663854_Class_0)(var_18 = new float[var_23])).var_6 + 210775691;
+        final double var_31 = 8.865908414454469E307;
+        var_20 = 'm';
+        var_22 = (Test6663854_Class_0)(var_18 = arg_0);
+        return arg_0;
+    }
+
+
+    protected Object clone()
+    {
+        ((Test6663854_Class_0)(var_18 = new short[var_23])).var_7 = 'M';
+        if (var_24)
+        {
+            var_24 = false;
+            String var_32 = "luigad";
+            ((Test6663854_Class_0.var_5 = 1.2920056E38F) > (Test6663854_Class_0.var_4 = var_23) ? (Test6663854_Class_0)(var_18 = new Test6663854_Interface_1[var_23][var_23]) : (var_22 = (var_22 = (Test6663854_Class_0)(var_18 = new byte[var_23])))).var_10 = 'A';
+            (var_22 = (var_22 = (var_22 = (new Test6663854_Class_0[var_23][var_23])[var_23][var_23]))).var_8 /= ((var_22 = (Test6663854_Class_0)(var_18 = (new short[var_23][var_23][var_23])[var_23])).var_7 = 'h');
+            (var_32 = var_32).toString();
+            var_32.length();
+            var_18 = (var_22 = (var_22 = new Test6663854_Class_0()));
+        }
+        else
+        {
+            Test6663854_Class_0.var_4 = var_23;
+            var_22 = (var_22 = (Test6663854_Class_0)(var_18 = "wbeuae"));
+            float var_33;
+            --(var_22 = (var_22 = (var_22 = (Test6663854_Class_0)(var_18 = (var_18 = "irl"))))).var_8;
+        }
+        Test6663854_Class_0.var_4 = var_23;
+        Test6663854_Class_0.var_5 = true ? ((Test6663854_Class_0)(var_18 = (var_18 = new boolean[Test6663854_Class_0.var_4 = var_23]))).var_8 : 'n';
+        var_24 = (var_24 ^= ((var_22 = (Test6663854_Class_0)(var_18 = "g")).var_6 > ((Test6663854_Class_0)(var_18 = "")).var_6 ? var_24 : true) ? false : (var_24 = var_24)) ? var_24 : false;
+        var_22 = (var_22 = (var_22 = (Test6663854_Class_0)(var_18 = new Test6663854_Interface_2[var_23])));
+        return var_18 = "qrlonljqj";
+    }
+
+    public boolean equals(Object obj)
+    {
+        Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = var_23)));
+        Test6663854_Class_0.var_5 = var_23 % (var_22 = (var_22 = (Test6663854_Class_0)obj)).var_6;
+        Test6663854_Class_0.var_5 = (((new Test6663854_Class_0[var_23][var_23][var_23])[var_23])[var_24 | (var_24 &= var_24) ? var_23 : (Test6663854_Class_0.var_4 = var_23)][var_23]).var_8;
+        var_22 = false ? (var_22 = (var_22 = (Test6663854_Class_0)obj)) : (var_22 = new Test6663854_Class_0());
+        Test6663854_Class_0.var_5 = ((new Test6663854_Class_0[var_23])[Test6663854_Class_0.var_4 = var_23]).var_8;
+        if (! !var_24)
+        {
+            var_18 = "";
+            (var_22 = (var_22 = (var_22 = (new Test6663854_Class_0[var_23])[Test6663854_Class_0.var_4 = var_23]))).var_8 >>= true ? ~ (Test6663854_Class_0.var_4 = var_23) : (var_22 = (Test6663854_Class_0)obj).var_8++;
+            (var_24 ? (Test6663854_Class_0)(var_18 = "") : (Test6663854_Class_0)obj).var_8 <<= var_23;
+            Test6663854_Class_0.var_4 = var_23;
+            var_20 = (var_22 = (var_22 = (var_22 = (var_22 = (Test6663854_Class_0)obj)))).var_8;
+        }
+        else
+        {
+            var_19 = ((Test6663854_Class_0)obj).var_6;
+        }
+        (var_24 ? "qyukxpui" : (new String[var_23])[Test6663854_Class_0.var_4 = var_23]).substring(612084215);
+        Test6663854_Class_0.var_5 = 'B';
+        if (!false || false)
+        {
+            Test6663854_Class_0.var_4 = true & var_24 ? var_23 : var_23;
+        }
+        else
+        {
+            "eksoig".lastIndexOf('b' > (var_22 = (Test6663854_Class_0)obj).var_8 | 4782397447545636864L >= (short)'[' ? "qusgbf" : "kvmylvct");
+        }
+        Test6663854_Class_0.var_5 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = var_23)) << var_23;
+        int var_34;
+        var_24 = (var_24 &= (var_24 &= true));
+        var_22 = (var_22 = (var_22 = (var_22 = (Test6663854_Class_0)obj)));
+        Test6663854_Interface_1 var_35 = var_24 & ((var_24 &= var_24) ^ var_24) ? (new Test6663854_Interface_1[var_23])[var_23] : (new Test6663854_Interface_1[var_23][var_23])[var_23][var_23];
+        long var_36 = 1042482863045573632L;
+        return var_24;
+    }
+
+
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854_Class_3.var_19 = "; result += Test6663854.Printer.print(var_19);
+        result += "\n";
+        result += "Test6663854_Class_3.var_20 = "; result += Test6663854.Printer.print(var_20);
+        result += "\n";
+        result += "Test6663854_Class_3.var_21 = "; result += Test6663854.Printer.print(var_21);
+        result += "\n";
+        result += "Test6663854_Class_3.var_23 = "; result += Test6663854.Printer.print(var_23);
+        result += "\n";
+        result += "Test6663854_Class_3.var_24 = "; result += Test6663854.Printer.print(var_24);
+        result += "\n";
+        result += "Test6663854_Class_3.var_22 = "; result += Test6663854.Printer.print(var_22);
+        result += "\n";
+        result += "Test6663854_Class_3.var_18 = "; result += Test6663854.Printer.print(var_18);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+}
+
+
+final class Test6663854_Class_4 implements Test6663854_Interface_1, Test6663854_Interface_2 {
+    long var_38 = (Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_23 < (short)+3417996718812544000L) ? ~543562136204028928L : 1593726438;
+    char var_39;
+    static Test6663854_Interface_2 var_40;
+    Test6663854_Class_3 var_41;
+    final short var_42 = false ? Test6663854_Class_3.var_23 : (short)~Test6663854_Class_0.var_2;
+    byte var_43;
+    static Test6663854_Interface_1[] var_44;
+    final char var_45 = 4.321821176880639E307 < var_42 ? 'o' : 'v';
+
+
+    public Test6663854_Class_4()
+    {
+        Test6663854_Class_0.var_5 = 2137855185;
+        "nueqthqo".length();
+        {
+            Test6663854_Class_0.var_9 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23));
+            Object var_61;
+        }
+        String var_62 = "rirgfphe";
+        ((new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_22 = ((var_41 = (var_41 = new Test6663854_Class_3())).var_22 = new Test6663854_Class_0());
+        {
+            Test6663854_Class_0.var_5 = ((new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_42;
+            new Test6663854_Class_0();
+            var_41 = (var_41 = new Test6663854_Class_3());
+            new Test6663854_Class_0().var_8 %= var_38;
+            (var_41 = ((new Test6663854_Class_3[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_22 = new Test6663854_Class_0();
+            Test6663854_Class_0.var_4 = (Test6663854_Class_3.var_24 |= true) ? (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23) : Test6663854_Class_3.var_23;
+            Test6663854_Interface_1 var_63 = ((new Test6663854_Interface_1[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[var_43 = (var_43 = Test6663854_Class_3.var_23)])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23];
+            (var_41 = (var_41 = (var_41 = (var_41 = (Test6663854_Class_3)var_63)))).var_18 = var_62;
+        }
+        if (Test6663854_Class_3.var_24 ^= false)
+        {
+            var_40 = (var_40 = new Test6663854_Class_3());
+        }
+        else
+        {
+            var_62 = "agwqc";
+            Test6663854_Class_3.var_24 ^= ((Test6663854_Class_3.var_24 ^= Test6663854_Class_3.var_24 && Test6663854_Class_3.var_24) & false ? false : !Test6663854_Class_3.var_24) ? false | (Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24) : (Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 |= ! (Test6663854_Class_3.var_24 |= true)));
+            var_38 *= (Test6663854_Class_3.var_20 = (Test6663854_Class_0.var_5 = var_42 + ((Test6663854_Class_3.var_20 = 5378782303770527744L) <= var_42 ? var_42 : var_42)));
+        }
+        var_38 &= ((var_41 = new Test6663854_Class_3()).var_22 = new Test6663854_Class_0()).var_6;
+        var_62 = var_62;
+        (var_41 = new Test6663854_Class_3()).var_18 = ((new Object[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (var_43 = Test6663854_Class_3.var_23))];
+        final short var_64 = (short)'C';
+        ((var_41 = new Test6663854_Class_3()).var_22 = new Test6663854_Class_0()).var_8 /= ((var_41 = new Test6663854_Class_3()).var_22 = new Test6663854_Class_0()).var_6;
+    }
+
+
+    public boolean func_0()
+    {
+        final boolean var_46 = false;
+        return Test6663854_Class_3.var_24;
+    }
+
+    public String func_1(final String arg_0, final long arg_1, byte arg_2, char[][] arg_3)
+    {
+        {
+            --(false ? new Test6663854_Class_0() : new Test6663854_Class_0()).var_8;
+            "d".substring(Test6663854_Class_3.var_23 - (arg_2 /= + (Test6663854_Class_3.var_20 = 1483190006) * (Test6663854_Class_3.var_20 = ~Test6663854_Class_3.var_23)), 496015226);
+            {
+                var_41 = new Test6663854_Class_3();
+            }
+            var_38 ^= (! ((Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24) && (new boolean[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]) ? arg_1 : (var_39 = var_45)) / var_45;
+            var_38 -= (Test6663854_Class_3.var_20 = (new short[Test6663854_Class_3.var_23])[arg_2]) != (arg_2 ^= new Test6663854_Class_0().var_6 & - (var_38 /= (Test6663854_Class_0.var_5 = var_38))) ? (Test6663854_Class_0.var_5 = - -Test6663854_Class_3.var_21) : arg_2;
+            var_41 = new Test6663854_Class_3();
+            var_40 = Test6663854_Class_3.var_24 ? new Test6663854_Class_3() : new Test6663854_Class_3();
+        }
+        int var_47;
+        (var_41 = new Test6663854_Class_3()).var_18 = "qvph";
+        {
+            ++arg_2;
+        }
+        final Test6663854_Class_3 var_48 = new Test6663854_Class_3();
+        {
+            var_48.var_22 = (Test6663854_Class_0)((var_41 = var_48).var_18 = (var_48.var_22 = (Test6663854_Class_0)(var_48.var_18 = new String[Test6663854_Class_3.var_23])));
+            {
+                final float[] var_49 = (new float[Test6663854_Class_3.var_23][arg_2])[arg_2];
+            }
+            "lcfxrlilw".replace(var_45, (char)(false ? (short)2998890687978943488L : var_42));
+            arg_0.lastIndexOf("mx");
+            final Test6663854_Class_0 var_50 = var_48.var_22 = (var_48.var_22 = (Test6663854_Class_0)((var_41 = var_48).var_18 = var_48));
+            new String();
+            ((Test6663854_Class_3)(var_40 = (var_40 = (var_41 = var_48)))).var_22 = var_50;
+            var_50.var_8 |= arg_2;
+            Test6663854_Class_3.var_20 = + - (Test6663854_Class_3.var_20 = var_42) - var_45 * Test6663854_Class_3.var_21;
+            double var_51 = false ? 1.6389923323715952E308 : (var_39 = var_45);
+            (((new Test6663854_Class_3[arg_2][arg_2])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_18 = (var_48.var_18 = new Test6663854_Class_0[arg_2 %= 2090054678][arg_2]);
+        }
+        {
+            var_47 = (arg_2 |= (((new Test6663854_Class_4[arg_2][arg_2][arg_2])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_42);
+        }
+        ((new Test6663854_Class_4[arg_2][Test6663854_Class_3.var_23])[++arg_2][arg_2 <<= 'c']).var_41 = var_48;
+        var_41 = var_48;
+        Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24 & Test6663854_Class_3.var_24;
+        final boolean var_52 = false;
+        {
+            var_40 = (var_41 = (var_41 = var_48));
+            (false ? var_48 : var_48).var_18 = ((new Test6663854_Class_0[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23][arg_2])[arg_2])[++arg_2][(byte)203097731];
+            var_41 = (var_41 = var_48);
+        }
+        var_38 |= (short)Test6663854_Class_3.var_23;
+        var_40 = var_48;
+        {
+            arg_0.indexOf("lnf", var_47 = (((Test6663854_Class_0)(var_48.var_18 = arg_0)).var_8 <<= (var_39 = var_45)));
+        }
+        final boolean var_53 = true & 1.4130067467800934E308 < (arg_2 |= 'W');
+        var_48.var_18 = (var_41 = var_48);
+        (var_48.var_22 = new Test6663854_Class_0()).var_7 = Test6663854_Class_3.var_23;
+        Test6663854_Class_3.var_24 ^= (Test6663854_Class_3.var_24 || var_52) ^ ("axg".startsWith(arg_0, var_47 = (var_47 = var_45)) | var_52);
+        {
+            Test6663854_Class_0.var_5 = 275192701;
+        }
+        Test6663854_Interface_2 var_54;
+        if (true)
+        {
+            var_54 = (var_41 = (var_41 = (var_41 = var_48)));
+        }
+        else
+        {
+            ((Test6663854_Class_0)((var_41 = var_48).var_18 = (var_41 = var_48))).var_7 = var_42;
+        }
+        Test6663854_Class_0.var_5 = --arg_2;
+        ((new String[Test6663854_Class_3.var_23])[var_53 ? arg_2 : arg_2]).length();
+        return arg_0 + "qj";
+    }
+
+
+
+
+    final String func_0(byte arg_0, Test6663854_Class_0 arg_1, final Test6663854_Class_0 arg_2, final Object arg_3)
+    {
+        arg_2.var_7 = (new long[arg_0][arg_0])[Test6663854_Class_3.var_23][arg_0 >>>= 1960882886] % arg_0;
+        Object var_55 = (((new Test6663854_Class_4[Test6663854_Class_3.var_23])[--arg_0]).var_41 = new Test6663854_Class_3()).var_18 = (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24 ? (Test6663854_Class_3.var_24 |= (Test6663854_Class_3.var_24 &= true)) : Test6663854_Class_3.var_24 | Test6663854_Class_3.var_24) ? (var_41 = (var_41 = (Test6663854_Class_3)arg_3)) : arg_3;
+        var_38 >>= ((Test6663854_Class_3.var_24 = (Test6663854_Class_3.var_24 ^= Test6663854_Class_3.var_24)) | (Test6663854_Class_3.var_24 ^= Test6663854_Class_3.var_24) ? arg_2.var_6 : (arg_0 -= 1.2264686416488313E308)) != (((new Test6663854_Class_4[arg_0])[arg_0]).var_38 >>>= arg_2.var_8) ? Test6663854_Class_3.var_23 : arg_0;
+        arg_0 += 1826349110 / (Test6663854_Class_0.var_5 = arg_0) + Test6663854_Class_3.var_21;
+        (var_41 = !Test6663854_Class_3.var_24 ? (var_41 = (new Test6663854_Class_3[arg_0])[arg_0]) : (new Test6663854_Class_3[--arg_0])[arg_0]).var_22 = ((new Test6663854_Class_0[arg_0][(byte)9.18575257519393E307][Test6663854_Class_3.var_23])[arg_0])[Test6663854_Class_3.var_23][arg_0];
+        (var_41 = new Test6663854_Class_3()).var_22 = ((var_41 = (var_41 = (var_41 = (var_41 = (var_41 = (Test6663854_Class_3)arg_3))))).var_22 = (arg_1 = arg_2));
+        Test6663854_Class_3.var_20 = Test6663854_Class_3.var_21;
+        var_40 = (var_41 = (Test6663854_Class_3)(new Test6663854_Interface_2[Test6663854_Class_3.var_23])[arg_0]);
+        Test6663854_Class_0.var_5 = Test6663854_Class_3.var_24 || (Test6663854_Class_3.var_24 = (Test6663854_Class_3.var_24 &= true)) ? var_42 : arg_1.var_6;
+        Test6663854_Class_0.var_5 = arg_0;
+        {
+            arg_0 %= (short)var_45;
+        }
+        var_55 = arg_1;
+        var_40 = (var_41 = (Test6663854_Class_3.var_24 = false) ? (Test6663854_Class_3)var_55 : (Test6663854_Class_3)arg_3);
+        ((Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_20 = arg_0) == 2.2107098E38F) ? (Test6663854_Class_3)(new Test6663854_Interface_1[Test6663854_Class_3.var_23])[arg_0] : (Test6663854_Class_3)(var_55 = "unofratwy")).var_18 = var_55;
+        return "";
+    }
+
+    private static String func_1(final String arg_0, char arg_1, final boolean arg_2, Test6663854_Class_3 arg_3)
+    {
+        if (!false & Test6663854_Class_3.var_24)
+        {
+            char var_56 = 'C';
+        }
+        else
+        {
+            var_40 = (var_40 = (var_40 = arg_3));
+        }
+        ((new Test6663854_Class_0[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_7 = 2.4396145E38F + (((new Test6663854_Class_4[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][(byte)Test6663854_Class_0.var_2]).var_43 = Test6663854_Class_3.var_23);
+        if (Test6663854_Class_3.var_24)
+        {
+            arg_3 = arg_3;
+        }
+        else
+        {
+            arg_3 = (arg_3 = arg_3);
+        }
+        ((new Test6663854_Class_4[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23))])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_38 /= '`';
+        arg_3 = (arg_3 = arg_3);
+        new Test6663854_Class_0().var_8++;
+        return "qd";
+    }
+
+    private Object func_2(short[][] arg_0, final Test6663854_Interface_2 arg_1)
+    {
+        boolean var_57 = false;
+        Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23;
+        var_40 = arg_1;
+        ((var_41 = (var_41 = (var_41 = (Test6663854_Class_3)arg_1))).var_22 = (Test6663854_Class_0)(((Test6663854_Class_3)arg_1).var_18 = "dxrwk")).var_7 = 'c';
+        if (true)
+        {
+            var_40 = (var_40 = arg_1);
+        }
+        else
+        {
+            var_39 = 'Z';
+        }
+        Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23;
+        var_41 = (Test6663854_Class_3)arg_1;
+        return (var_57 ? (Test6663854_Class_3.var_20 = Test6663854_Class_3.var_23) % 1271549437 : 1.1177259470512304E308) <= 1363316667 ? (new Test6663854_Class_3().var_18 = new String[var_43 = Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]) : new Test6663854_Class_0();
+    }
+
+    private short func_3()
+    {
+        int var_58 = true ? var_42 : 1438975079;
+        float var_59 = Test6663854_Class_3.var_21;
+        var_43 = Test6663854_Class_3.var_23;
+        (var_41 = (Test6663854_Class_3)(new Test6663854_Interface_1[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_22 = (Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_24) ? new Test6663854_Class_0() : new Test6663854_Class_0();
+        var_58++;
+        return Test6663854_Class_3.var_24 ? var_42 : (var_43 = Test6663854_Class_3.var_23);
+    }
+
+    protected void func_4(final long arg_0, Test6663854_Class_0 arg_1, long arg_2)
+    {
+        ++arg_2;
+        (arg_1 = (arg_1 = (arg_1 = arg_1))).var_8 /= var_42;
+        --arg_2;
+        final Test6663854_Interface_1 var_60 = ((new Test6663854_Interface_1[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[var_43 = Test6663854_Class_3.var_23])[var_43 = Test6663854_Class_3.var_23];
+        (true ? (Test6663854_Class_3)var_60 : (Test6663854_Class_3)var_60).var_22 = (Test6663854_Class_0)((var_41 = (Test6663854_Class_3)(var_40 = (Test6663854_Class_3)var_60)).var_18 = (Test6663854_Class_3.var_24 ^= Test6663854_Class_3.var_23 == var_38) ? "xo" : "uocm");
+        var_38 >>= (((Test6663854_Class_3)(new Test6663854_Interface_2[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_22 = (new Test6663854_Class_0[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)]).var_8++;
+    }
+
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854_Class_4.var_39 = "; result += Test6663854.Printer.print(var_39);
+        result += "\n";
+        result += "Test6663854_Class_4.var_45 = "; result += Test6663854.Printer.print(var_45);
+        result += "\n";
+        result += "Test6663854_Class_4.var_44 = "; result += Test6663854.Printer.print(var_44);
+        result += "\n";
+        result += "Test6663854_Class_4.var_38 = "; result += Test6663854.Printer.print(var_38);
+        result += "\n";
+        result += "Test6663854_Class_4.var_40 = "; result += Test6663854.Printer.print(var_40);
+        result += "\n";
+        result += "Test6663854_Class_4.var_42 = "; result += Test6663854.Printer.print(var_42);
+        result += "\n";
+        result += "Test6663854_Class_4.var_43 = "; result += Test6663854.Printer.print(var_43);
+        result += "\n";
+        result += "Test6663854_Class_4.var_41 = "; result += Test6663854.Printer.print(var_41);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+}
+
+
+class Test6663854_Class_5 implements Test6663854_Interface_1 {
+    final static char var_65 = 'S';
+    static Test6663854_Interface_2 var_66;
+    static Test6663854_Class_3 var_67;
+
+
+    public Test6663854_Class_5()
+    {
+        {
+            ((new Test6663854_Class_4[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)])[((Test6663854_Class_4)(new Object[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_43 = Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).func_0();
+        }
+        Test6663854_Class_0.var_5 = 1409421143;
+        Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 = false);
+        Test6663854_Class_0.var_5 = 3.1496384E38F / (((new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_45 - (Test6663854_Class_3.var_23 + Test6663854_Class_3.var_23));
+        ((new Test6663854_Class_3[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_18 = (new Object[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23];
+        var_66 = (Test6663854_Class_4.var_40 = (Test6663854_Class_3.var_24 = (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24)) || false ? new Test6663854_Class_3() : new Test6663854_Class_3());
+    }
+
+
+    public boolean func_0()
+    {
+        {
+            Test6663854_Class_4.var_40 = (new Test6663854_Class_4[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23];
+        }
+        Test6663854_Class_0.var_5 = (new char[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23];
+        var_66 = (Test6663854_Class_4.var_40 = new Test6663854_Class_3());
+        Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23;
+        ((var_67 = (var_67 = new Test6663854_Class_3())).var_22 = (Test6663854_Class_0)(new Test6663854_Class_3().var_18 = (new Test6663854_Class_3().var_22 = (new Test6663854_Class_3().var_22 = new Test6663854_Class_0())))).var_8++;
+        Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23;
+        return (Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24) ? (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24) : (Test6663854_Class_3.var_24 = true);
+    }
+
+    public String func_1(final String arg_0, final long arg_1, byte arg_2, char[][] arg_3)
+    {
+        ((true | (Test6663854_Class_3.var_24 |= true)) ^ false ? (new Test6663854_Class_0[(byte)arg_1])[arg_2--] : new Test6663854_Class_0()).var_8 <<= 2320675830599883776L;
+        arg_2 -= ~ (Test6663854_Class_3.var_24 ? ~4954934861909065728L : 5155213238651986944L);
+        ((Test6663854_Class_4)((new Test6663854_Interface_1[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[arg_2 >>>= 1308733456])[Test6663854_Class_3.var_23]).var_43 = arg_2;
+        Test6663854_Class_0.var_5 = Test6663854_Class_3.var_21;
+        {
+            String var_68;
+            float var_69 = Test6663854_Class_3.var_21 * new Test6663854_Class_0().var_6;
+            ++arg_2;
+            Test6663854_Class_0.var_9 = arg_2;
+            --arg_2;
+            var_68 = arg_0;
+            final long[] var_70 = new long[arg_2 /= 8889610108908524544L];
+            Test6663854_Class_4 var_71;
+        }
+        new Test6663854_Class_4().var_38++;
+        Test6663854_Class_3 var_72;
+        (((new Test6663854_Class_4[arg_2 *= var_65][Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[arg_2])[arg_2 ^= arg_2][Test6663854_Class_3.var_23]).var_38--;
+        new Test6663854_Class_3();
+        (((new Test6663854_Class_4[arg_2 /= 7.692983E37F][Test6663854_Class_3.var_23])[arg_2])[Test6663854_Class_3.var_23]).var_38 |= arg_1;
+        Test6663854_Class_3.var_24 ^= "dlhn".startsWith(true ? "tad" : "bssdfvig", 1125165775) ? Test6663854_Class_3.var_24 : true;
+        Test6663854_Class_3.var_20 = new Test6663854_Class_0().var_8;
+        Test6663854_Class_4.var_40 = (Test6663854_Class_4.var_40 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]);
+        var_72 = (new Test6663854_Class_3[Test6663854_Class_3.var_23][arg_2])[arg_2][arg_2];
+        Test6663854_Class_0.var_5 = 8634870161778523136L;
+        {
+            Test6663854_Class_0.var_5 = ~4420139622226571264L;
+            Test6663854_Class_3.var_24 &= (var_72 = (var_67 = (new Test6663854_Class_3[arg_2][Test6663854_Class_3.var_23])[arg_2][arg_2])).func_0();
+            arg_2 >>= false ? 'H' : ']';
+        }
+        switch (arg_2)
+        {
+            case 72:
+                String var_73 = arg_0;
+                break;
+
+            default:
+
+        }
+        ((Test6663854_Class_0)((var_72 = (Test6663854_Class_3)(new Test6663854_Interface_1[Test6663854_Class_3.var_23])[arg_2]).var_18 = (Test6663854_Class_4)(new Test6663854_Interface_2[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])).var_7 = ((new Test6663854_Class_0[arg_2])[arg_2]).var_6 + Test6663854_Class_3.var_23;
+        var_67 = (var_67 = (new Test6663854_Class_3[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[(byte)7.184837949183326E307][arg_2 ^= 2188582676874919936L]);
+        return ((Test6663854_Class_3.var_24 = new Test6663854_Class_0().var_6 >= (short)3.4944631475014644E307) ? Test6663854_Class_3.var_24 : (Test6663854_Class_3.var_24 ? (Test6663854_Class_3.var_24 = false) : !Test6663854_Class_3.var_24)) ? "y" : arg_0;
+    }
+
+
+
+
+    private final static float func_0(boolean arg_0)
+    {
+        {
+            new String();
+            {
+                new String();
+            }
+            ((Test6663854_Class_3)(new Object[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[(new byte[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_18 = (new Object[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[(byte)1.577204008065932E308][Test6663854_Class_3.var_23];
+            int var_74;
+            new String();
+            Test6663854_Class_0.var_5 = ((Test6663854_Class_0)(new Object[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_1;
+            new String();
+            new String();
+            new String();
+            var_74 = '_';
+            new Test6663854_Class_0();
+            Test6663854_Class_3 var_75 = var_67 = (((new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_41 = (new Test6663854_Class_3[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]);
+        }
+        if (Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24))
+        {
+            new String();
+            arg_0 &= ~ (((new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_38 /= Test6663854_Class_3.var_23) < 2.1129417E38F;
+        }
+        else
+        {
+            Test6663854_Class_3.var_24 &= false & arg_0 ? false : (Test6663854_Class_3.var_24 &= true);
+        }
+        new Test6663854_Class_0().var_8 ^= 682270015;
+        Test6663854_Class_4.var_40 = (Test6663854_Class_4)(new Test6663854_Interface_2[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23];
+        new Test6663854_Class_0();
+        new String();
+        new Test6663854_Class_0();
+        new Test6663854_Class_3().var_22 = new Test6663854_Class_0();
+        Test6663854_Class_4.var_40 = (new Test6663854_Class_4[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23];
+        new Test6663854_Class_0().var_8++;
+        new Test6663854_Class_0();
+        Test6663854_Interface_1 var_76;
+        return Test6663854_Class_3.var_21;
+    }
+
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854_Class_5.var_65 = "; result += Test6663854.Printer.print(var_65);
+        result += "\n";
+        result += "Test6663854_Class_5.var_66 = "; result += Test6663854.Printer.print(var_66);
+        result += "\n";
+        result += "Test6663854_Class_5.var_67 = "; result += Test6663854.Printer.print(var_67);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+}
+
+
+class Test6663854_Class_6 implements Test6663854_Interface_1, Test6663854_Interface_2 {
+    static long var_77;
+    final byte var_78 = Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23;
+    long var_79;
+    static Test6663854_Class_4 var_80;
+    static long var_81 = 1365276905537306624L;
+    static Test6663854_Class_3 var_82;
+
+
+    public Test6663854_Class_6()
+    {
+        Test6663854_Class_0.var_5 = Test6663854_Class_3.var_24 ? "bdouannkn".charAt(373674594) : (char)2612074035904901120L;
+        if (Test6663854_Class_3.var_24)
+        {
+            ++var_81;
+            Test6663854_Class_0.var_4 = (new byte[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)])[var_78];
+            var_81++;
+        }
+        else
+        {
+            (var_80 = (Test6663854_Class_4)(new Test6663854_Interface_1[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = var_78]).var_41 = (((Test6663854_Class_4)(new Object[Test6663854_Class_3.var_23])[var_78]).var_41 = new Test6663854_Class_3());
+            var_81--;
+            ((new Test6663854_Class_0[var_78])[var_78]).equals((Test6663854_Class_5.var_67 = (Test6663854_Class_3)(new Test6663854_Interface_2[var_78])[var_78]).var_18 = Test6663854_Class_3.var_23 <= var_78 ? "uvoxke" : new String());
+            {
+                Test6663854_Class_5.var_66 = (new Test6663854_Interface_2[var_78])[((new Test6663854_Class_6[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_78];
+            }
+            "huxqcdmii".compareTo("owtdmma");
+            Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)));
+            var_81--;
+        }
+        {
+            Test6663854_Class_5[][] var_85;
+        }
+        Test6663854_Class_5.var_66 = (new Test6663854_Interface_2[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][Test6663854_Class_3.var_23];
+        if (Test6663854_Class_3.var_24 || Test6663854_Class_3.var_24)
+        {
+            var_82 = (new Test6663854_Class_3[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[var_78][Test6663854_Class_3.var_23];
+            Test6663854_Class_0.var_5 = ++var_81;
+            Test6663854_Class_0.var_5 = Test6663854_Class_3.var_24 ? 'n' : Test6663854_Class_5.var_65;
+        }
+        else
+        {
+            new Test6663854_Class_0();
+        }
+        Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23);
+        Test6663854_Class_5.var_66 = new Test6663854_Class_3();
+        Test6663854_Class_3 var_86 = (var_80 = (var_80 = (var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]))).var_41 = (var_82 = (Test6663854_Class_5.var_67 = ((new Test6663854_Class_3[Test6663854_Class_3.var_23][var_78][Test6663854_Class_3.var_23])[var_78])[var_78][Test6663854_Class_3.var_23]));
+        Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24) ? (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24) : (Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24);
+        var_86.var_22 = (new Test6663854_Class_0[var_78][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][var_78];
+        (Test6663854_Class_3.var_24 ? "evayayw" : "ndcq").startsWith("hwlik" + "tnhfsky");
+        var_86 = var_86;
+    }
+
+
+    public boolean func_0()
+    {
+        Test6663854_Class_4.var_40 = (Test6663854_Class_5.var_67 = (new Test6663854_Class_3[var_78])[Test6663854_Class_3.var_23]);
+        return Test6663854_Class_0.var_2 != (((new Test6663854_Class_0[var_78])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_7 = Test6663854_Class_3.var_23);
+    }
+
+    public String func_1(final String arg_0, final long arg_1, byte arg_2, char[][] arg_3)
+    {
+        Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_24;
+        Test6663854_Class_0.var_5 = var_78;
+        char var_83;
+        (Test6663854_Class_5.var_67 = (Test6663854_Class_5.var_67 = (Test6663854_Class_3)(new Test6663854_Interface_1[var_78])[Test6663854_Class_3.var_23])).var_18 = new Test6663854_Class_0[arg_2];
+        Test6663854_Class_4 var_84;
+        var_81++;
+        return arg_0;
+    }
+
+
+
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854_Class_6.var_77 = "; result += Test6663854.Printer.print(var_77);
+        result += "\n";
+        result += "Test6663854_Class_6.var_79 = "; result += Test6663854.Printer.print(var_79);
+        result += "\n";
+        result += "Test6663854_Class_6.var_81 = "; result += Test6663854.Printer.print(var_81);
+        result += "\n";
+        result += "Test6663854_Class_6.var_78 = "; result += Test6663854.Printer.print(var_78);
+        result += "\n";
+        result += "Test6663854_Class_6.var_80 = "; result += Test6663854.Printer.print(var_80);
+        result += "\n";
+        result += "Test6663854_Class_6.var_82 = "; result += Test6663854.Printer.print(var_82);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+}
+
+
+final class Test6663854_Class_7 extends Test6663854_Class_5 implements Test6663854_Interface_2 {
+    final float var_87 = 1.1671899E38F;
+    static char var_88 = var_65;
+    float var_89 = 2.166908E37F;
+
+
+    public Test6663854_Class_7()
+    {
+        var_88--;
+        (Test6663854_Class_5.var_67 = (new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).equals(((Test6663854_Class_3)(Test6663854_Class_4.var_40 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])).var_18 = "nycb");
+        Test6663854_Class_3.var_24 = (Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_24 ^ false);
+        Test6663854_Class_3.var_24 ^= true;
+        var_88--;
+        Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)(new Test6663854_Interface_1[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]);
+        if (false)
+        {
+            Test6663854_Class_4.var_40 = (new Test6663854_Interface_2[Test6663854_Class_3.var_23])[((new Test6663854_Class_6[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_78];
+        }
+        else
+        {
+            --Test6663854_Class_6.var_81;
+        }
+        Test6663854_Class_6.var_81--;
+        ++Test6663854_Class_6.var_81;
+        if (Test6663854_Class_3.var_24 = !Test6663854_Class_3.var_24)
+        {
+            --var_88;
+            Test6663854_Class_5.var_66 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23];
+            Test6663854_Class_6.var_81 &= 1451013276;
+            var_88--;
+        }
+        else
+        {
+            var_88 >>= --var_88;
+        }
+        Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24;
+        Test6663854_Class_6.var_81--;
+        Test6663854_Class_6.var_81++;
+        --Test6663854_Class_6.var_81;
+        ((new Test6663854_Class_4[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_41 = new Test6663854_Class_3();
+    }
+
+
+
+
+    public final Test6663854_Class_4 func_0(int arg_0)
+    {
+        --arg_0;
+        {
+            switch ((char)var_89)
+            {
+                case 'R':
+
+            }
+            ++(Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]))).var_38;
+            ++var_88;
+            Test6663854_Class_5.var_66 = ~Test6663854_Class_6.var_81 % Test6663854_Class_6.var_81 != (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23) ? (Test6663854_Class_6.var_82 = (var_67 = new Test6663854_Class_3())) : new Test6663854_Class_3();
+        }
+        (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)(new Test6663854_Interface_2[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])).var_38 &= new Test6663854_Class_0().var_6;
+        Test6663854_Interface_1 var_90 = ((new Test6663854_Interface_1[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23];
+        (Test6663854_Class_5.var_67 = (new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_22 = new Test6663854_Class_0();
+        if (true)
+        {
+            ((Test6663854_Class_3)(Test6663854_Interface_2)var_90).var_18 = (((Test6663854_Class_3)var_90).var_22 = new Test6663854_Class_0());
+        }
+        else
+        {
+            ++((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)var_90)).var_45 > 7.093005581228189E307 ? (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)var_90)) : (Test6663854_Class_4)(var_90 = (Test6663854_Class_3)var_90)).var_38;
+        }
+        var_67 = (Test6663854_Class_3.var_24 ^= true) ^ true ? (Test6663854_Class_6.var_82 = (Test6663854_Class_3)var_90) : (Test6663854_Class_3)var_90;
+        {
+            {
+                var_90 = (var_90 = (Test6663854_Class_5)var_90);
+            }
+            Test6663854_Class_3.var_24 ^= (Test6663854_Class_3.var_24 = (Test6663854_Class_3.var_24 ? !false : true) ? (Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 = (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24))) : ! !Test6663854_Class_3.var_24 && false);
+            Test6663854_Class_0 var_91;
+            Test6663854_Class_4.var_40 = (Test6663854_Class_3.var_24 |= (Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_24) && (Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_24) | ! (Test6663854_Class_3.var_24 ^= Test6663854_Class_3.var_24)) ? (+Test6663854_Class_3.var_21 != 4.6479454E37F ? (Test6663854_Class_6)(Test6663854_Interface_2)var_90 : (Test6663854_Class_6)var_90) : (new Test6663854_Class_6[Test6663854_Class_3.var_23])[((Test6663854_Class_6)(Test6663854_Class_5.var_66 = (Test6663854_Interface_2)var_90)).var_78];
+        }
+        arg_0 |= ((((Test6663854_Class_6.var_80 = (Test6663854_Class_4)var_90).var_41 = (((Test6663854_Class_4)var_90).var_41 = (Test6663854_Class_3)var_90)).var_22 = new Test6663854_Class_0()).var_8 += (Test6663854_Class_3.var_20 = new Test6663854_Class_0().var_1));
+        {
+            (Test6663854_Class_6.var_82 = (new Test6663854_Class_3[((Test6663854_Class_6)var_90).var_78])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_18 = ((Test6663854_Class_3.var_24 ^ (!false & Test6663854_Class_3.var_24 ? Test6663854_Class_3.var_24 : (Test6663854_Class_3.var_24 ^= true)) ? (Test6663854_Class_3)var_90 : (Test6663854_Class_3)(Test6663854_Interface_2)var_90).var_18 = ((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)var_90)).var_41 = (Test6663854_Class_3)(((Test6663854_Class_3)(Test6663854_Class_5.var_66 = (Test6663854_Class_3)var_90)).var_18 = (((Test6663854_Class_3)(Test6663854_Interface_2)var_90).var_18 = (Test6663854_Class_3)var_90))));
+        }
+        {
+            arg_0++;
+        }
+        Test6663854_Interface_2 var_92;
+        {
+            ((Test6663854_Class_5.var_67 = ((Test6663854_Class_6.var_80 = (Test6663854_Class_4)(var_92 = (Test6663854_Class_3)var_90)).var_41 = (Test6663854_Class_5.var_67 = (Test6663854_Class_3)var_90))).var_22 = ((Test6663854_Class_5.var_67 = (new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_22 = ((Test6663854_Class_6.var_82 = (((Test6663854_Class_4)var_90).var_41 = (Test6663854_Class_3)var_90)).var_22 = (Test6663854_Class_0)((Test6663854_Class_5.var_67 = (Test6663854_Class_3)var_90).var_18 = (Test6663854_Class_5)var_90)))).var_7 = Test6663854_Class_3.var_21;
+        }
+        arg_0 &= ((var_67 = (var_67 = new Test6663854_Class_3())).var_22 = (new Test6663854_Class_0[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_1 / 2084775982;
+        Test6663854_Class_3.var_20 = (Test6663854_Class_3.var_24 = false) ^ true ? Test6663854_Class_0.var_2 : var_88;
+        return Test6663854_Class_6.var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[((Test6663854_Class_6)var_90).var_78];
+    }
+
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854_Class_7.var_65 = "; result += Test6663854.Printer.print(var_65);
+        result += "\n";
+        result += "Test6663854_Class_7.var_88 = "; result += Test6663854.Printer.print(var_88);
+        result += "\n";
+        result += "Test6663854_Class_7.var_87 = "; result += Test6663854.Printer.print(var_87);
+        result += "\n";
+        result += "Test6663854_Class_7.var_89 = "; result += Test6663854.Printer.print(var_89);
+        result += "\n";
+        result += "Test6663854_Class_7.var_66 = "; result += Test6663854.Printer.print(var_66);
+        result += "\n";
+        result += "Test6663854_Class_7.var_67 = "; result += Test6663854.Printer.print(var_67);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+}
+
+
+final class Test6663854_Class_8 implements Test6663854_Interface_1 {
+    long var_93 = ++Test6663854_Class_6.var_81;
+    short var_94 = (short)'H';
+    float var_95;
+    Test6663854_Interface_1 var_96;
+    final static float var_97 = 2.43397E38F;
+    final static long var_98 = 7461452158234510336L;
+    static Test6663854_Class_7[] var_99;
+
+
+    public Test6663854_Class_8()
+    {
+        Test6663854_Class_6 var_103;
+        Test6663854_Class_3.var_20 = (Test6663854_Class_3.var_20 = var_94 / ~Test6663854_Class_3.var_23);
+        if (false)
+        {
+            Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24;
+        }
+        else
+        {
+            Test6663854_Class_3.var_20 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23);
+            --Test6663854_Class_7.var_88;
+        }
+        final boolean[] var_104 = new boolean[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23];
+        Test6663854_Class_3.var_24 &= false & (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24);
+    }
+
+
+    public boolean func_0()
+    {
+        Test6663854_Class_7.var_88--;
+        Test6663854_Class_7.var_88--;
+        (((new Test6663854_Class_0[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_7 = 1.2337083709828518E307;
+        Test6663854_Class_7.var_88++;
+        if (Test6663854_Class_3.var_24)
+        {
+            byte var_100 = 25;
+            "lwmbav".toLowerCase();
+            Test6663854_Class_3.var_24 |= (Test6663854_Class_3.var_24 |= false) & ((Test6663854_Class_3.var_24 = true) ^ Test6663854_Class_3.var_24 ? (Test6663854_Class_3.var_24 ^= Test6663854_Class_3.var_24) : Test6663854_Class_3.var_24);
+        }
+        else
+        {
+            long var_101;
+            new Test6663854_Class_0().var_7 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23);
+            Test6663854_Class_7.var_88++;
+            ++Test6663854_Class_6.var_81;
+            Test6663854_Class_7.var_88 |= 1083041050566936576L;
+            Test6663854_Class_7.var_88--;
+        }
+        var_94 ^= (var_94 = (var_94 *= 1.9072213520938263E307));
+        var_94++;
+        boolean var_102 = true;
+        ++Test6663854_Class_7.var_88;
+        Test6663854_Class_7.var_88 += (((new Test6663854_Class_7[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_87;
+        ((new Test6663854_Class_7[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_89 /= (new double[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23] * var_97;
+        return (Test6663854_Class_3.var_24 = (var_102 &= Test6663854_Class_3.var_24 & true)) ? false : Test6663854_Class_3.var_24;
+    }
+
+    public String func_1(final String arg_0, final long arg_1, byte arg_2, char[][] arg_3)
+    {
+        Test6663854_Class_7.var_88--;
+        --Test6663854_Class_7.var_88;
+        return arg_0;
+    }
+
+
+
+    private Test6663854_Class_0 func_0(Test6663854_Class_0 arg_0)
+    {
+        if (!Test6663854_Class_3.var_24)
+        {
+            Test6663854_Class_7.var_88 ^= (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23) << ~Test6663854_Class_0.var_2;
+        }
+        else
+        {
+            (Test6663854_Class_6.var_82 = (Test6663854_Class_7.var_67 = (Test6663854_Class_3)(new Test6663854_Interface_1[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])).var_22 = (((new Test6663854_Class_3[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_22 = (arg_0 = arg_0));
+        }
+        if ((Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23))) < Test6663854_Class_3.var_21)
+        {
+            Test6663854_Class_3.var_24 ^= (Test6663854_Class_3.var_24 ^= (arg_0.var_6 ^ Test6663854_Class_7.var_88++) != (long)"y".codePointAt((int)var_94)) | false;
+        }
+        else
+        {
+            ++Test6663854_Class_7.var_88;
+        }
+        var_94 >>= (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23);
+        {
+            Test6663854_Class_7.var_66 = (Test6663854_Class_6)(var_96 = new Test6663854_Class_3());
+        }
+        arg_0.var_8 = 'u';
+        var_94 ^= arg_0.var_8;
+        ++var_94;
+        Test6663854_Class_7.var_88++;
+        var_94 += (Test6663854_Class_3.var_20 = '^');
+        return arg_0;
+    }
+
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854_Class_8.var_99 = "; result += Test6663854.Printer.print(var_99);
+        result += "\n";
+        result += "Test6663854_Class_8.var_93 = "; result += Test6663854.Printer.print(var_93);
+        result += "\n";
+        result += "Test6663854_Class_8.var_98 = "; result += Test6663854.Printer.print(var_98);
+        result += "\n";
+        result += "Test6663854_Class_8.var_95 = "; result += Test6663854.Printer.print(var_95);
+        result += "\n";
+        result += "Test6663854_Class_8.var_97 = "; result += Test6663854.Printer.print(var_97);
+        result += "\n";
+        result += "Test6663854_Class_8.var_94 = "; result += Test6663854.Printer.print(var_94);
+        result += "\n";
+        result += "Test6663854_Class_8.var_96 = "; result += Test6663854.Printer.print(var_96);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+}
+
+public class Test6663854 {
+    static short var_105 = 19709;
+    static int var_106 = (((Test6663854_Class_3.var_24 = (Test6663854_Class_3.var_24 = false)) | true) & (false || !false) ? ! (Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_24) : Test6663854_Class_3.var_24 | false) ? (short)5430142769559462912L : Test6663854_Class_3.var_23 << Test6663854_Class_3.var_23;
+    boolean var_107 = false;
+
+
+    private static long func_0(final boolean arg_0)
+    {
+        {
+            Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)(new Test6663854_Interface_2[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]);
+        }
+        var_105++;
+        ((new Test6663854_Class_7[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)][((new Test6663854_Class_6[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_78]).func_0();
+        byte var_108 = 107;
+        return Test6663854_Class_8.var_98 << ((new Test6663854_Class_4[var_108][Test6663854_Class_3.var_23])[var_108][Test6663854_Class_3.var_24 ? var_108 : var_108--]).var_42;
+    }
+
+    protected final long func_1(char[] arg_0, Object arg_1, String arg_2, final int arg_3)
+    {
+        (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)))).var_43 = ((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)))))).var_43 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23));
+        {
+            Test6663854_Class_6.var_82 = ((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)).var_41 = (((new Test6663854[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_107 ^= var_107) ? (new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23] : (Test6663854_Class_3)arg_1);
+        }
+        Test6663854_Class_7.var_88 += (Test6663854_Class_6.var_81 >>>= (var_107 &= var_107) | var_107 ? (int)- ~Test6663854_Class_6.var_81 : Test6663854_Class_3.var_23);
+        short var_109 = 11276;
+        final Test6663854_Class_8 var_110 = (Test6663854_Class_3.var_24 ^ (Test6663854_Class_3.var_24 = !true) ? 5788412835121658880L : var_109--) * (298795405395535872L << (((Test6663854_Class_0)arg_1).var_1 << (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23))) != (var_107 ? var_109 : var_105) ? (Test6663854_Class_8)(arg_1 = arg_1) : ((Test6663854_Class_3.var_24 &= ! !var_107) ? new Test6663854_Class_8() : new Test6663854_Class_8());
+        new Test6663854_Class_4().var_43 = Test6663854_Class_3.var_24 ^ Test6663854_Class_3.var_24 ^ Test6663854_Class_3.var_24 ? (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)) : (new byte[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][Test6663854_Class_3.var_23];
+        Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)));
+        Test6663854_Class_3.var_20 = (var_105 ^= (var_106 *= ((Test6663854_Class_7)arg_1).var_87 - ~Test6663854_Class_3.var_23));
+        Test6663854_Class_5 var_111;
+        Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1));
+        {
+            (arg_2 = "eiprceumt").compareTo(arg_2 = (arg_2 = "roae"));
+        }
+        if (false)
+        {
+            final Test6663854_Class_8 var_112 = var_110;
+            (true | (false && false) ? (Test6663854_Class_3)arg_1 : ((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)).var_41 = (Test6663854_Class_6.var_82 = (Test6663854_Class_3)(var_110.var_96 = var_112)))).var_18 = var_110;
+        }
+        else
+        {
+            (Test6663854_Class_3.var_24 ? "jpunp" : "giappofq").concat(arg_2 = (arg_2 = arg_2));
+        }
+        {
+            Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24;
+        }
+        var_110.var_96 = (Test6663854_Class_7)(var_110.var_96 = (Test6663854_Class_7)arg_1);
+        {
+            ((var_107 ^= !var_107) && (!Test6663854_Class_3.var_24 | (Test6663854_Class_3.var_24 |= true) ? !false : !true) ? (Test6663854_Class_4)arg_1 : (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)).var_41 = (((var_107 &= (var_107 &= (var_107 &= (Test6663854_Class_3.var_24 ^= (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24))))) ? (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1) : (Test6663854_Class_4)(Test6663854_Class_4.var_40 = (Test6663854_Class_7)arg_1)).var_41 = ((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)))).var_41 = false || Test6663854_Class_3.var_24 ? (Test6663854_Class_3)arg_1 : (Test6663854_Class_3)arg_1));
+        }
+        Test6663854_Class_5.var_66 = false ? (new Test6663854_Class_6[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23] : new Test6663854_Class_6();
+        var_105++;
+        arg_1 = (var_111 = new Test6663854_Class_7());
+        ((true ? (Test6663854_Class_3)arg_1 : (Test6663854_Class_3)arg_1).var_22 = (new Test6663854_Class_0[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_8 >>= (var_106 /= ((Test6663854_Class_4)arg_1).var_42) - ((Test6663854_Class_3.var_24 ? (Test6663854_Class_4)arg_1 : (Test6663854_Class_4)arg_1).var_43 = ((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1))).var_43 = new Test6663854_Class_6().var_78));
+        return 5795172173382514688L | (((var_107 &= (Test6663854_Class_3.var_24 |= false) | Test6663854_Class_3.var_24) ? (Test6663854_Class_4)arg_1 : (Test6663854_Class_6.var_80 = (Test6663854_Class_4)arg_1)).var_43 = Test6663854_Class_3.var_23);
+    }
+
+    public static String execute()
+    {
+        try {
+            Test6663854 t = new Test6663854();
+            try { t.test(); }
+            catch(Throwable e) { }
+            try { return t.toString(); }
+            catch (Throwable e) { return "Error during result conversion to String"; }
+        } catch (Throwable e) { return "Error during test execution"; }
+    }
+
+    public static void main(String[] args)
+    {
+        try {
+            Test6663854 t = new Test6663854();
+            try { t.test(); }
+            catch(Throwable e) { }
+            try { System.out.println(t); }
+            catch(Throwable e) { }
+        } catch (Throwable e) { }
+    }
+
+    private void test()
+    {
+        if ((true & (false ? !false : true) || var_107 ? (var_105 <<= Test6663854_Class_5.var_65) / (Test6663854_Class_0.var_5 = var_105) : 2509076152709535744L) >= (true ? new Test6663854_Class_8().var_93 : (Test6663854_Class_3.var_20 = Test6663854_Class_3.var_23)))
+        {
+            (true ? (new Test6663854_Class_6[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23] : (Test6663854_Class_7.var_66 = new Test6663854_Class_6())).func_0();
+            (var_107 ? (new Test6663854_Class_4().var_41 = new Test6663854_Class_3()) : (Test6663854_Class_3)(new Test6663854_Class_3().var_18 = new Test6663854_Class_8())).var_18 = (new Test6663854_Class_4[new Test6663854_Class_4().var_43 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)])[Test6663854_Class_3.var_23];
+            {
+                ++var_106;
+                ((new Test6663854_Class_5().func_0() ? (Test6663854_Class_3.var_24 ^= true) : !false) ? (Test6663854_Class_7)new Test6663854_Class_5() : (Test6663854_Class_7)((Test6663854_Class_6.var_82 = new Test6663854_Class_3()).var_18 = (Test6663854_Class_6.var_80 = new Test6663854_Class_4()))).var_89 -= Test6663854_Class_3.var_23;
+            }
+            var_105 *= 4.59906108270682E307;
+            {
+                Test6663854_Class_0.var_4 = ((Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = new Test6663854_Class_4()))).var_43 = (Test6663854_Class_0.var_4 = false ? (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23) : Test6663854_Class_3.var_23));
+            }
+            long var_113 = 0L;
+            var_106 %= (Test6663854_Class_7.var_88 = 'i');
+            for (short var_114 = ((Test6663854_Class_8)(new Test6663854_Interface_1[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23))])[Test6663854_Class_3.var_23]).var_94; var_113 < 4 && false; "dmmntw".length())
+            {
+                var_114++;
+                var_113++;
+                Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24;
+                new String("kprvouugy");
+            }
+            {
+                new Test6663854_Class_8().var_94 /= (long)5.719961906225282E307;
+            }
+            double var_115 = 0;
+            Test6663854_Class_3.var_24 |= var_107;
+            do
+            {
+                Test6663854_Class_3.var_24 ^= (Test6663854_Class_3.var_24 |= !Test6663854_Class_3.var_24 && var_107);
+                var_115++;
+                var_106++;
+            } while (var_115 < 29 && (var_107 = (Test6663854_Class_3.var_24 |= Test6663854_Class_3.var_24)));
+            Test6663854_Class_7.var_66 = new Test6663854_Class_7();
+            Test6663854_Class_3.var_20 = (false ? (Test6663854_Class_6.var_80 = new Test6663854_Class_4()) : new Test6663854_Class_4()).var_42;
+        }
+        else
+        {
+            (((new Test6663854_Class_3[Test6663854_Class_3.var_23])[new Test6663854_Class_6().var_78]).var_22 = (new Test6663854_Class_3().var_22 = (new Test6663854_Class_3().var_22 = new Test6663854_Class_0()))).var_8 >>>= (var_106 &= new Test6663854_Class_0().var_8 << Test6663854_Class_7.var_65);
+        }
+        Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = new Test6663854_Class_4()));
+        {
+            Test6663854_Class_8 var_116;
+            (!true | (Test6663854_Class_3.var_24 &= true) ? new Test6663854_Class_7() : new Test6663854_Class_7()).var_89 /= Test6663854_Class_7.var_88--;
+            Test6663854_Class_6.var_81 &= ((new Test6663854_Class_6[Test6663854_Class_3.var_23][Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_78;
+        }
+        if (true ? var_107 : (Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 |= false)))
+        {
+            Test6663854_Class_3.var_24 |= (Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 ^= (Test6663854_Class_3.var_24 &= !true)));
+            Test6663854_Class_7.var_66 = (Test6663854_Class_7)new Test6663854_Class_5();
+            Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)));
+            {
+                Test6663854_Class_7.var_88++;
+                float var_117 = 0F;
+                final long var_118 = 1388589135930756096L;
+                for (var_106++; ((Test6663854_Class_3.var_24 &= false) && (Test6663854_Class_3.var_24 |= true)) ^ true && (var_117 < 1 && Test6663854_Class_3.var_24); new Test6663854_Class_0().var_8 += !false || (new float[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23] <= Test6663854_Class_3.var_23 || Test6663854_Class_3.var_24 ? ((new Test6663854_Class_6[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_78 : Test6663854_Class_3.var_23)
+                {
+                    Test6663854_Class_6.var_82 = (Test6663854_Class_7.var_67 = new Test6663854_Class_3());
+                    var_117++;
+                    Test6663854_Class_5.var_66 = Test6663854_Class_3.var_24 & !Test6663854_Class_3.var_24 ^ var_107 ^ new Test6663854_Class_0().var_8 == new Test6663854_Class_0().var_1 ? new Test6663854_Class_6() : new Test6663854_Class_6();
+                    (Test6663854_Class_6.var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)]).var_43 = (Test6663854_Class_0.var_4 = ((Test6663854_Class_6)(new Object[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_78);
+                }
+                if (false & ((var_107 = true) || ! ((Test6663854_Class_3.var_24 ^= true || !var_107) ? false : Test6663854_Class_3.var_24 ^ var_107) ? (var_107 = Test6663854_Class_3.var_24) && Test6663854_Class_3.var_24 : true))
+                {
+                    "yvjk".toString();
+                }
+                else
+                {
+                    Test6663854_Class_4 var_119 = Test6663854_Class_6.var_80 = ((var_107 = true) ? (Test6663854_Class_6.var_81 |= var_106) : var_118) < (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23))) ? (Test6663854_Class_4)(new Test6663854_Class_3().var_18 = new float[Test6663854_Class_3.var_23]) : (new Test6663854_Class_4[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23];
+                }
+                (false | (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23) >= var_106 ? new Test6663854_Class_3() : (Test6663854_Class_7.var_67 = new Test6663854_Class_3())).var_18 = ((false ? (new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23] : new Test6663854_Class_3()).var_22 = ((Test6663854_Class_7.var_67 = (Test6663854_Class_3)(new Test6663854_Class_3().var_18 = (Test6663854_Class_8)(new Test6663854_Class_8().var_96 = new Test6663854_Class_8()))).var_22 = ((Test6663854_Class_5.var_67 = new Test6663854_Class_3()).var_22 = new Test6663854_Class_0())));
+                short var_120;
+                (Test6663854_Class_6.var_80 = new Test6663854_Class_4()).var_41 = new Test6663854_Class_3();
+                ((new Test6663854_Class_8[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_96 = (Test6663854_Class_4.var_40 = !false & var_107 ? (Test6663854_Class_7)new Test6663854_Class_5() : new Test6663854_Class_7());
+            }
+            if ((Test6663854_Class_3.var_24 &= (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24)) || Test6663854_Class_3.var_24)
+            {
+                {
+                    var_106--;
+                    Test6663854_Class_6.var_81 -= 2.5265952E38F;
+                    Test6663854_Class_7.var_88++;
+                    Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)(new Test6663854_Interface_1[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]));
+                    Test6663854_Class_6.var_81++;
+                    (((new Test6663854_Class_7[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[(byte)(var_105 &= var_106)])[Test6663854_Class_3.var_23]).equals(((new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_18 = new Test6663854_Class_8());
+                }
+                ((((new Test6663854_Class_4[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_41 = (Test6663854_Class_7.var_67 = (Test6663854_Class_7.var_67 = (new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]))).var_18 = "mnxktbgv";
+                var_105++;
+                ((Test6663854_Class_3.var_24 &= Test6663854_Class_3.var_24) ? (new Test6663854_Class_3().var_22 = new Test6663854_Class_0()) : new Test6663854_Class_0()).var_7 = new Test6663854_Class_0().var_6;
+                var_105 = (var_105 >>>= Test6663854_Class_3.var_23);
+                ++var_105;
+                {
+                    Test6663854_Class_0.var_4 = ((Test6663854_Class_6.var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_43 = ((new Test6663854_Class_6[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)]).var_78);
+                    ((Test6663854_Class_8)(new Object[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_96 = (Test6663854_Class_6.var_80 = (Test6663854_Class_4)(Test6663854_Class_5.var_66 = (new Test6663854_Class_6[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]));
+                }
+                if (2125632862 >= Test6663854_Class_5.var_65)
+                {
+                    "nfdjgd".toLowerCase();
+                    ((new Test6663854_Class_8[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]).var_96 = new Test6663854_Class_8();
+                    Test6663854_Interface_2 var_121;
+                    new Test6663854_Class_0().var_7 = Test6663854_Class_8.var_97;
+                    --var_105;
+                    --Test6663854_Class_7.var_88;
+                    ((new Test6663854_Class_0[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)])[((Test6663854_Class_6)(Test6663854_Class_4.var_40 = (Test6663854_Class_3)(new Object[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])).var_78]).var_7 = (((new Test6663854_Class_8[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_94 += (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23));
+                }
+                else
+                {
+                    --Test6663854_Class_6.var_81;
+                }
+                if (false)
+                {
+                    Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23;
+                }
+                else
+                {
+                    var_105--;
+                    (true | (Test6663854_Class_3.var_24 |= var_107) ? (Test6663854_Class_7)(new Test6663854_Class_5[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23] : (new Test6663854_Class_7[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]).var_89 *= 2.5396491E38F;
+                    var_107 = Test6663854_Class_3.var_24;
+                    var_105++;
+                }
+                Test6663854_Class_6.var_81--;
+                new Test6663854_Class_5();
+            }
+            else
+            {
+                Test6663854_Class_0 var_122;
+            }
+            var_107 = false;
+            (Test6663854_Class_3.var_24 ? new Test6663854_Class_7() : (Test6663854_Class_7)(new Test6663854_Class_3().var_18 = new Test6663854_Class_6())).var_89 *= (var_106 -= (new Test6663854_Class_3().var_22 = new Test6663854_Class_0()).var_6);
+            new Test6663854_Class_8().var_94 *= Test6663854_Class_7.var_88;
+            ((new String[new Test6663854_Class_6().var_78])[Test6663854_Class_3.var_23]).codePointAt(135817988);
+            final double var_123 = 7.395191963488875E307;
+            --var_105;
+            Test6663854_Class_7.var_88++;
+        }
+        else
+        {
+            (Test6663854_Class_6.var_80 = Test6663854_Class_3.var_24 ? (Test6663854_Class_6.var_80 = new Test6663854_Class_4()) : (Test6663854_Class_6.var_80 = new Test6663854_Class_4())).var_38 <<= (Test6663854_Class_7.var_88 <<= (new Test6663854_Class_4().var_43 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23))) >>> Test6663854_Class_3.var_23);
+        }
+        int var_124 = 0;
+        Test6663854_Class_7 var_125;
+        float var_126 = 2.5216562E38F;
+        int var_127 = 0;
+        Test6663854_Class_7.var_66 = (Test6663854_Class_5.var_66 = (Test6663854_Class_4)(new Test6663854_Interface_1[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]);
+        while (var_127 < 1 && (Test6663854_Class_3.var_24 ? (var_107 |= var_107) : false))
+        {
+            short var_128;
+            var_127++;
+            var_125 = (var_125 = (var_125 = (var_125 = (Test6663854_Class_7)(new Test6663854_Interface_1[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])));
+            "nkv".indexOf("ptrepiu" + "aljmjttym", var_106 << (new byte[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]);
+        }
+        {
+            Test6663854_Class_7.var_66 = (Test6663854_Class_7.var_67 = (new Test6663854_Class_3[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]);
+        }
+        var_125 = (new Test6663854_Class_7[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23];
+        var_126 += var_106;
+        while (var_124 < 537 && true)
+        {
+            Test6663854_Class_7.var_88 %= var_105;
+            var_124++;
+            var_106++;
+            Test6663854_Class_5.var_66 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]));
+            final long var_129 = 3230407753980990464L;
+            new Test6663854_Class_8();
+            final Test6663854_Class_3 var_130 = (Test6663854_Class_3)((Test6663854_Class_3.var_24 |= !false) | (var_107 | !var_107) | (new Test6663854_Class_8().var_94++ >= 1015752753 | !true) ? (new Test6663854_Class_6[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23] : (new Test6663854_Interface_1[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23]);
+            Test6663854_Class_5.var_66 = (Test6663854_Class_7.var_66 = (Test6663854_Class_7.var_66 = (Test6663854_Class_7.var_67 = var_130)));
+        }
+        Test6663854_Class_7.var_66 = var_126 >= (Test6663854_Class_3.var_24 || (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24) ? var_106 : var_126) ? new Test6663854_Class_6() : new Test6663854_Class_6();
+        var_107 &= true;
+        (new Test6663854_Class_8().var_96 = (var_125 = (var_125 = (var_125 = new Test6663854_Class_7())))).func_0();
+        float var_131 = 0F;
+        Test6663854_Class_3 var_132 = new Test6663854_Class_3();
+        do
+        {
+            long var_133;
+            var_131++;
+            Test6663854_Interface_2 var_134 = Test6663854_Class_7.var_66 = var_132;
+        } while ((Test6663854_Class_3.var_24 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23) == 1.2758309E38F) && (var_131 < 117 && ((new short[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)] == ((var_107 &= ! (Test6663854_Class_3.var_24 ^= false)) ^ ! !false ? new Test6663854_Class_0().var_6 : new Test6663854_Class_4().var_42)));
+        (var_107 ? var_132 : (Test6663854_Class_3)(Test6663854_Class_7.var_66 = var_132)).var_18 = (Test6663854_Class_0)((Test6663854_Class_5.var_67 = (Test6663854_Class_7.var_67 = (var_132 = var_132))).var_18 = ((new Test6663854_Class_8[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23))])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]);
+        long var_135 = 0L;
+        (((new Test6663854_Class_8[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = (byte)+ (Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23)]).var_96 = var_132;
+        for (((var_107 = ((new Test6663854_Class_0[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).equals(var_132.var_18 = (Test6663854_Class_8)(new Test6663854_Class_8().var_96 = var_132))) ? "oedsntb" : "ouspr").concat("t"); var_135 < 27; (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23]))).var_38 %= (double)func_0(Test6663854_Class_3.var_24 = false ? !var_107 && ! !Test6663854_Class_3.var_24 : Test6663854_Class_3.var_24 ^ ! (Test6663854_Class_3.var_24 = Test6663854_Class_3.var_24)))
+        {
+            Test6663854_Class_6.var_80 = (Test6663854_Class_4)(new Test6663854_Class_8().var_96 = new Test6663854_Class_8());
+            var_135++;
+            Test6663854_Class_3.var_24 &= !true;
+            new Test6663854_Class_0().var_8 += Test6663854_Class_7.var_65;
+        }
+        Test6663854_Class_3 var_136 = var_132;
+        if ((var_107 = ((Test6663854_Class_4)(Test6663854_Class_7.var_66 = (var_125 = (Test6663854_Class_7)(Test6663854_Class_7.var_66 = var_136)))).func_0() ^ true) && (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (Test6663854_Class_6.var_80 = (new Test6663854_Class_4[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]))).var_45 <= ((new Test6663854_Class_6[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23]).var_78)
+        {
+            var_126 %= ((var_107 = true) ? (var_105 *= 8272288534835139584L) : (var_105 |= var_105)) % new Test6663854_Class_7().var_89;
+            ((Test6663854_Class_3)(new Test6663854_Class_8().var_96 = (new Test6663854_Interface_1[Test6663854_Class_3.var_23])[Test6663854_Class_3.var_23])).var_18 = new Test6663854_Class_0();
+            var_105 <<= ++((Test6663854_Class_3.var_24 |= false) ? new Test6663854_Class_8() : (Test6663854_Class_8)(new Test6663854_Class_8().var_96 = new Test6663854_Class_4())).var_94 % (Test6663854_Class_0.var_2 - var_106);
+            Test6663854_Interface_2 var_137;
+            var_137 = new Test6663854_Class_4();
+            int var_138;
+        }
+        else
+        {
+            final Test6663854_Interface_2 var_139 = Test6663854_Class_4.var_40 = (var_136 = var_136);
+            new Test6663854_Class_8().var_93 -= new Test6663854_Class_6().var_78;
+            --((Test6663854_Class_3.var_24 ^= var_126 < Test6663854_Class_3.var_23 | !var_107) ? (new Test6663854_Class_8[Test6663854_Class_3.var_23][Test6663854_Class_3.var_23])[Test6663854_Class_0.var_4 = Test6663854_Class_3.var_23][new Test6663854_Class_6().var_78] : new Test6663854_Class_8()).var_93;
+            var_105 >>= ! (Test6663854_Class_3.var_24 = true) || ((Test6663854_Class_8)(! (Test6663854_Class_3.var_24 |= var_107) | (false ? Test6663854_Class_3.var_23 : new Test6663854_Class_6().var_78) != new Test6663854_Class_8().var_93 ? (Test6663854_Class_6.var_82 = var_136) : (new Test6663854_Class_8().var_96 = (var_132 = var_136)))).var_94 >= Test6663854_Class_6.var_81++ ? var_124 : '`';
+        }
+        var_126 /= var_105;
+        var_105 *= var_127;
+        var_107 ^= (var_107 = true);
+        {
+            Test6663854_Class_7.var_66 = (var_125 = (Test6663854_Class_7)(new Test6663854_Class_5[Test6663854_Class_3.var_23])[new Test6663854_Class_6().var_78]);
+        }
+        (var_136.var_22 = ((var_136 = var_132).var_22 = new Test6663854_Class_0())).var_7 = ((!false ? var_132 : (Test6663854_Class_7.var_67 = var_132)).var_22 = (((Test6663854_Class_3)(var_136.var_18 = var_136)).var_22 = (Test6663854_Class_0)(var_132.var_18 = "sgybwy"))).var_6;
+        ((Test6663854_Class_3.var_20 = var_105--) >= ~Test6663854_Class_3.var_23 ? new String() : "lgcfkbsw").replace(new Test6663854_Class_0().var_8 |= 't', false ? (Test6663854_Class_7.var_88 -= var_105) : 'q');
+        Test6663854_Class_7.var_88 <<= Test6663854_Class_7.var_88;
+    }
+    public String toString()
+    {
+        String result =  "[\n";
+        result += "Test6663854.var_105 = "; result += Printer.print(var_105);
+        result += "\n";
+        result += "Test6663854.var_107 = "; result += Printer.print(var_107);
+        result += "\n";
+        result += "Test6663854.var_106 = "; result += Printer.print(var_106);
+        result += "";
+        result += "\n]";
+        return result;
+    }
+    static class Printer
+    {
+        public static String print(boolean arg) { return String.valueOf(arg); }
+        public static String print(byte arg)    { return String.valueOf(arg); }
+        public static String print(short arg)   { return String.valueOf(arg); }
+        public static String print(char arg)    { return String.valueOf((int)arg); }
+        public static String print(int arg)     { return String.valueOf(arg); }
+        public static String print(long arg)    { return String.valueOf(arg); }
+        public static String print(float arg)   { return String.valueOf(arg); }
+        public static String print(double arg)  { return String.valueOf(arg); }
+
+
+        public static String print(Object arg)
+        {
+            return print_r(new java.util.Stack(), arg);
+        }
+
+        private static String print_r(java.util.Stack visitedObjects, Object arg)
+        {
+            String result = "";
+            if (arg == null)
+                result += "null";
+            else
+            if (arg.getClass().isArray())
+            {
+                for (int i = 0; i < visitedObjects.size(); i++)
+                    if (visitedObjects.elementAt(i) == arg) return "<recursive>";
+
+                visitedObjects.push(arg);
+
+                final String delimiter = ", ";
+                result += "[";
+
+                if (arg instanceof Object[])
+                {
+                    Object[] array = (Object[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print_r(visitedObjects, array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+                else
+                if (arg instanceof boolean[])
+                {
+                    boolean[] array = (boolean[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+                else
+                if (arg instanceof byte[])
+                {
+                    byte[] array = (byte[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+                else
+                if (arg instanceof short[])
+                {
+                    short[] array = (short[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+                else
+                if (arg instanceof char[])
+                {
+                    char[] array = (char[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+                else
+                if (arg instanceof int[])
+                {
+                     int[] array = (int[]) arg;
+                     for (int i = 0; i < array.length; i++)
+                     {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                     }
+                }
+                else
+                if (arg instanceof long[])
+                {
+                    long[] array = (long[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+                else
+                if (arg instanceof float[])
+                {
+                    float[] array = (float[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+                else
+                if (arg instanceof double[])
+                {
+                    double[] array = (double[]) arg;
+                    for (int i = 0; i < array.length; i++)
+                    {
+                        result += print(array[i]);
+                        if (i < array.length - 1) result += delimiter;
+                    }
+                }
+
+                result += "]";
+                visitedObjects.pop();
+
+            } else
+            {
+                result += arg.toString();
+            }
+
+            return result;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6892265/Test.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6892265
+ * @summary System.arraycopy unable to reference elements beyond Integer.MAX_VALUE bytes
+ *
+ * @run main/othervm Test
+ */
+
+public class Test {
+  static  final int NCOPY = 1;
+  static  final int OVERFLOW = 1;
+  static  int[] src2 = new int[NCOPY];
+  static  int[] dst2;
+
+  static void test() {
+    int N;
+    int SIZE;
+
+    N = Integer.MAX_VALUE/4 + OVERFLOW;
+    System.arraycopy(src2, 0, dst2, N, NCOPY);
+    System.arraycopy(dst2, N, src2, 0, NCOPY);
+  }
+
+  public static void main(String[] args) {
+    try {
+      dst2 = new int[NCOPY + Integer.MAX_VALUE/4 + OVERFLOW];
+    } catch (OutOfMemoryError e) {
+       System.exit(95); // Not enough memory
+    }
+    System.out.println("warmup");
+    for (int i=0; i <11000; i++) {
+      test();
+    }
+    System.out.println("start");
+    for (int i=0; i <1000; i++) {
+      test();
+    }
+    System.out.println("finish");
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6930043/Test6930043.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6930043
+ * @summary C2: SIGSEGV in javasoft.sqe.tests.lang.arr017.arr01702.arr01702.loop_forw(II)I
+ *
+ * @run main Test6930043
+ */
+
+import java.io.PrintStream;
+
+public class Test6930043 {
+    int[] a;
+    int idx;
+
+    public int loop_back(int i, int i_0_) {
+        int i_1_ = 0;
+        int[] is = a;
+        if (is == null) return 0;
+        for (int i_2_ = i; i_2_ >= i_0_; i_2_--)
+            i_1_ += is[idx = i_2_];
+        return i_1_;
+    }
+
+    public int loop_forw(int start, int end) {
+        int result = 0;
+        int[] is = a;
+        if (is == null) return 0;
+        for (int index = start; index < end; index++)
+            result += is[index];
+            // result += is[idx = index];
+        return result;
+    }
+
+    public static void main(String[] strings) {
+        Test6930043 var_Test6930043 = new Test6930043();
+        var_Test6930043.a = new int[1000000];
+        var_Test6930043.loop_forw(10, 999990);
+        var_Test6930043.loop_forw(10, 999990);
+        for (int i = 0; i < 3; i++) {
+            try {
+                if (var_Test6930043.loop_forw(-1, 999990) != 0) throw new InternalError();
+            } catch (ArrayIndexOutOfBoundsException e) { }
+        }
+        var_Test6930043.loop_back(999990, 10);
+        var_Test6930043.loop_back(999990, 10);
+        for (int i = 0; i < 3; i++) {
+            try {
+                if (var_Test6930043.loop_back(999990, -1) != 0) throw new InternalError();
+            } catch (ArrayIndexOutOfBoundsException e) { }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6932496/Test6932496.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6932496
+ * @summary incorrect deopt of jsr subroutine on 64 bit c1
+ *
+ * @compile -source 1.5 -target 1.5 -XDjsrlimit=0 Test6932496.java
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test6932496.m Test6932496
+ */
+
+public class Test6932496 {
+    static class A {
+        volatile boolean flag = false;
+    }
+
+    static void m() {
+        try {
+        } finally {
+            A a = new A();
+            a.flag = true;
+        }
+    }
+
+
+    static public void main(String[] args) {
+        m();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6935535/Test.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6935535
+ * @summary String.indexOf() returns incorrect result on x86 with SSE4.2
+ *
+ * @run main/othervm -Xcomp Test
+ */
+
+public class Test {
+
+  static int IndexOfTest(String str) {
+    return str.indexOf("1111111111111xx1x");
+  }
+
+  public static void main(String args[]) {
+    String str = "1111111111111xx1111111111111xx1x";
+    str = str.substring(0, 31);
+    int idx = IndexOfTest(str);
+    System.out.println("IndexOf(" + "1111111111111xx1x" + ") = " + idx + " in " + str);
+    if (idx != -1) {
+      System.exit(97);
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6946040/TestCharShortByteSwap.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2010 Google, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6946040
+ * @summary Tests Character/Short.reverseBytes and their intrinsics implementation in the server compiler
+ * @run main/othervm -Xbatch -server -XX:CompileOnly=.testChar,.testShort TestCharShortByteSwap
+ */
+
+// This test must run without any command line arguments.
+
+public class TestCharShortByteSwap {
+
+  private static short initShort(String[] args, short v) {
+    if (args.length > 0) {
+      try {
+        return (short) Integer.valueOf(args[0]).intValue();
+      } catch (NumberFormatException e) { }
+    }
+    return v;
+  }
+
+  private static char initChar(String[] args, char v) {
+    if (args.length > 0) {
+      try {
+        return (char) Integer.valueOf(args[0]).intValue();
+      } catch (NumberFormatException e) { }
+    }
+    return v;
+  }
+
+  private static void testChar(char a, char b) {
+    if (a != Character.reverseBytes(b)) {
+      throw new RuntimeException("FAIL: " + (int)a + " != Character.reverseBytes(" + (int)b + ")");
+    }
+    if (b != Character.reverseBytes(a)) {
+      throw new RuntimeException("FAIL: " + (int)b + " != Character.reverseBytes(" + (int)a + ")");
+    }
+  }
+
+  private static void testShort(short a, short b) {
+    if (a != Short.reverseBytes(b)) {
+      throw new RuntimeException("FAIL: " + (int)a + " != Short.reverseBytes(" + (int)b + ")");
+    }
+    if (b != Short.reverseBytes(a)) {
+      throw new RuntimeException("FAIL: " + (int)b + " != Short.reverseBytes(" + (int)a + ")");
+    }
+  }
+
+  public static void main(String[] args) {
+    for (int i = 0; i < 100000; ++i) { // Trigger compilation
+      char c1 = initChar(args, (char) 0x0123);
+      char c2 = initChar(args, (char) 0x2301);
+      char c3 = initChar(args, (char) 0xaabb);
+      char c4 = initChar(args, (char) 0xbbaa);
+      short s1 = initShort(args, (short) 0x0123);
+      short s2 = initShort(args, (short) 0x2301);
+      short s3 = initShort(args, (short) 0xaabb);
+      short s4 = initShort(args, (short) 0xbbaa);
+      testChar(c1, c2);
+      testChar(c3, c4);
+      testShort(s1, s2);
+      testShort(s3, s4);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/6929067/T.java	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,12 @@
+public class T
+{
+  public static boolean foo(boolean bar)
+  {
+    return bar;
+  }
+
+  public static void printIt()
+  {
+    System.out.println("Hello");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/6929067/Test6929067.sh	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+##
+## @test Test6929067.sh
+## @bug 6929067
+## @summary Stack guard pages should be removed when thread is detached
+## @run shell Test6929067.sh
+##
+
+if [ "${TESTSRC}" = "" ]
+then TESTSRC=.
+fi
+
+if [ "${TESTJAVA}" = "" ]
+then
+  PARENT=`dirname \`which java\``
+  TESTJAVA=`dirname ${PARENT}`
+  echo "TESTJAVA not set, selecting " ${TESTJAVA}
+  echo "If this is incorrect, try setting the variable manually."
+fi
+
+BIT_FLAG=""
+
+# set platform-dependent variables
+OS=`uname -s`
+case "$OS" in
+  Linux)
+    NULL=/dev/null
+    PS=":"
+    FS="/"
+    ;;
+  SunOS | Windows_* )
+    NULL=NUL
+    PS=";"
+    FS="\\"
+    echo "Test passed; only valid for Linux"
+    exit 0;
+    ;;
+  * )
+    echo "Unrecognized system!"
+    exit 1;
+    ;;
+esac
+
+LD_LIBRARY_PATH=.:${TESTJAVA}/jre/lib/i386/client:/usr/openwin/lib:/usr/dt/lib:/usr/lib:$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH
+
+THIS_DIR=`pwd`
+
+cp ${TESTSRC}${FS}invoke.c ${THIS_DIR}
+cp ${TESTSRC}${FS}T.java ${THIS_DIR}
+
+
+${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -fullversion
+
+${TESTJAVA}${FS}bin${FS}javac T.java
+
+gcc -o invoke -I${TESTJAVA}/include -I${TESTJAVA}/include/linux invoke.c ${TESTJAVA}/jre/lib/i386/client/libjvm.so
+./invoke
+exit $?
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/6929067/invoke.c	Thu May 13 18:26:35 2010 -0700
@@ -0,0 +1,90 @@
+#include <assert.h>
+#include <jni.h>
+#include <alloca.h>
+
+#include <pthread.h>
+
+union env_union
+{
+  void *void_env;
+  JNIEnv *jni_env;
+};
+
+union env_union tmp;
+JNIEnv* env;
+JavaVM* jvm;
+JavaVMInitArgs vm_args;
+JavaVMOption options[1];
+jclass class_id;
+jmethodID method_id;
+jint result;
+
+long product(unsigned long n, unsigned long m) {
+    if (m == 1) {
+      return n;
+    } else {
+      int *p = alloca(sizeof (int));
+      *p = n;
+      return product (n, m-1) + *p;
+    }
+}
+
+void *
+floobydust (void *p)
+{
+  (*jvm)->AttachCurrentThread(jvm, &tmp.void_env, NULL);
+  env = tmp.jni_env;
+
+  class_id = (*env)->FindClass (env, "T");
+  assert (class_id);
+
+  method_id = (*env)->GetStaticMethodID (env, class_id, "printIt", "()V");
+  assert (method_id);
+
+  (*env)->CallStaticVoidMethod (env, class_id, method_id, NULL);
+
+  (*jvm)->DetachCurrentThread(jvm);
+
+  printf("%ld\n", product(5000,5000));
+
+  (*jvm)->AttachCurrentThread(jvm, &tmp.void_env, NULL);
+  env = tmp.jni_env;
+
+  class_id = (*env)->FindClass (env, "T");
+  assert (class_id);
+
+  method_id = (*env)->GetStaticMethodID (env, class_id, "printIt", "()V");
+  assert (method_id);
+
+  (*env)->CallStaticVoidMethod (env, class_id, method_id, NULL);
+
+  (*jvm)->DetachCurrentThread(jvm);
+
+  printf("%ld\n", product(5000,5000));
+
+  return NULL;
+}
+
+int
+main (int argc, const char** argv)
+{
+  options[0].optionString = "-Xss320k";
+
+  vm_args.version = JNI_VERSION_1_2;
+  vm_args.ignoreUnrecognized = JNI_TRUE;
+  vm_args.options = options;
+  vm_args.nOptions = 1;
+
+  result = JNI_CreateJavaVM (&jvm, &tmp.void_env, &vm_args);
+  assert (result >= 0);
+
+  env = tmp.jni_env;
+
+  floobydust (NULL);
+
+  pthread_t thr;
+  pthread_create (&thr, NULL, floobydust, NULL);
+  pthread_join (thr, NULL);
+
+  return 0;
+}