changeset 2021:0915f9be781c

Merge
author trims
date Thu, 13 Jan 2011 22:54:23 -0800
parents 76d6282dcfe5 (current diff) ffd725ff6943 (diff)
children 75efcee5ac47
files
diffstat 79 files changed, 1057 insertions(+), 1104 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -4104,7 +4104,7 @@
 
   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
   sub(top, t1, t1); // size of tlab's allocated portion
-  incr_allocated_bytes(t1, 0, t2);
+  incr_allocated_bytes(t1, t2, t3);
 
   // refill the tlab with an eden allocation
   bind(do_refill);
@@ -4138,19 +4138,14 @@
   delayed()->nop();
 }
 
-void MacroAssembler::incr_allocated_bytes(Register var_size_in_bytes,
-                                          int con_size_in_bytes,
-                                          Register t1) {
+void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
+                                          Register t1, Register t2) {
   // Bump total bytes allocated by this thread
   assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
-  assert_different_registers(var_size_in_bytes, t1);
+  assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
   // v8 support has gone the way of the dodo
   ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
-  if (var_size_in_bytes->is_valid()) {
-    add(t1, var_size_in_bytes, t1);
-  } else {
-    add(t1, con_size_in_bytes, t1);
-  }
+  add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
   stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
 }
 
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -823,15 +823,23 @@
   };
 
   // test if x is within signed immediate range for nbits
-  static bool is_simm(int x, int nbits) { return -( 1 << nbits-1 )  <= x   &&   x  <  ( 1 << nbits-1 ); }
+  static bool is_simm(intptr_t x, int nbits) { return -( intptr_t(1) << nbits-1 )  <= x   &&   x  <  ( intptr_t(1) << nbits-1 ); }
 
   // test if -4096 <= x <= 4095
-  static bool is_simm13(int x) { return is_simm(x, 13); }
+  static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
+
+  static bool is_in_wdisp_range(address a, address b, int nbits) {
+    intptr_t d = intptr_t(b) - intptr_t(a);
+    return is_simm(d, nbits + 2);
+  }
 
   // test if label is in simm16 range in words (wdisp16).
   bool is_in_wdisp16_range(Label& L) {
-    intptr_t d = intptr_t(pc()) - intptr_t(target(L));
-    return is_simm(d, 18);
+    return is_in_wdisp_range(target(L), pc(), 16);
+  }
+  // test if the distance between two addresses fits in simm30 range in words
+  static bool is_in_wdisp30_range(address a, address b) {
+    return is_in_wdisp_range(a, b, 30);
   }
 
   enum ASIs { // page 72, v9
@@ -1843,6 +1851,8 @@
   inline void jmp( Register s1, Register s2 );
   inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
 
+  // Check if the call target is out of wdisp30 range (relative to the code cache)
+  static inline bool is_far_target(address d);
   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
   inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
   inline void callr( Register s1, Register s2 );
@@ -2389,7 +2399,8 @@
     Label&   slow_case                 // continuation point if fast allocation fails
   );
   void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
-  void incr_allocated_bytes(Register var_size_in_bytes, int con_size_in_bytes, Register t1);
+  void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
+                            Register t1, Register t2);
 
   // interface method calling
   void lookup_interface_method(Register recv_klass,
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -588,10 +588,13 @@
 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
 
+inline bool MacroAssembler::is_far_target(address d) {
+  return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
+}
+
 // Call with a check to see if we need to deal with the added
 // expense of relocation and if we overflow the displacement
-// of the quick call instruction./
-// Check to see if we have to deal with relocations
+// of the quick call instruction.
 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
 #ifdef _LP64
   intptr_t disp;
@@ -603,14 +606,12 @@
 
   // Is this address within range of the call instruction?
   // If not, use the expensive instruction sequence
-  disp = (intptr_t)d - (intptr_t)pc();
-  if ( disp != (intptr_t)(int32_t)disp ) {
+  if (is_far_target(d)) {
     relocate(rt);
     AddressLiteral dest(d);
     jumpl_to(dest, O7, O7);
-  }
-  else {
-    Assembler::call( d, rt );
+  } else {
+    Assembler::call(d, rt);
   }
 #else
   Assembler::call( d, rt );
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2358,6 +2358,8 @@
          op->tmp3()->as_register()  == G4 &&
          op->tmp4()->as_register()  == O1 &&
          op->klass()->as_register() == G5, "must be");
+
+  LP64_ONLY( __ signx(op->len()->as_register()); )
   if (UseSlowPath ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -170,11 +170,13 @@
   Register t2,                         // temp register
   Label&   slow_case                   // continuation point if fast allocation fails
 ) {
+  RegisterOrConstant size_in_bytes = var_size_in_bytes->is_valid()
+    ? RegisterOrConstant(var_size_in_bytes) : RegisterOrConstant(con_size_in_bytes);
   if (UseTLAB) {
     tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
   } else {
     eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
-    incr_allocated_bytes(var_size_in_bytes, con_size_in_bytes, t1);
+    incr_allocated_bytes(size_in_bytes, t1, t2);
   }
 }
 
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -461,7 +461,7 @@
           // get the instance size
           __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
           __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
-          __ incr_allocated_bytes(G1_obj_size, 0, G3_t1);
+          __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
 
           __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
           __ verify_oop(O0_obj);
@@ -577,7 +577,7 @@
           __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
 
           __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path);  // preserves G1_arr_size
-          __ incr_allocated_bytes(G1_arr_size, 0, G3_t1);
+          __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);
 
           __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
           __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1295,16 +1295,13 @@
 // Get the method data pointer from the methodOop and set the
 // specified register to its value.
 
-void InterpreterMacroAssembler::set_method_data_pointer_offset(Register Roff) {
+void InterpreterMacroAssembler::set_method_data_pointer() {
   assert(ProfileInterpreter, "must be profiling interpreter");
   Label get_continue;
 
   ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
   test_method_data_pointer(get_continue);
   add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
-  if (Roff != noreg)
-    // Roff contains a method data index ("mdi").  It defaults to zero.
-    add(ImethodDataPtr, Roff, ImethodDataPtr);
   bind(get_continue);
 }
 
@@ -1315,10 +1312,11 @@
   Label zero_continue;
 
   // Test MDO to avoid the call if it is NULL.
-  ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr);
+  ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
   test_method_data_pointer(zero_continue);
   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
-  set_method_data_pointer_offset(O0);
+  add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
+  add(ImethodDataPtr, O0, ImethodDataPtr);
   bind(zero_continue);
 }
 
@@ -1369,7 +1367,6 @@
 }
 
 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
-                                                                Register cur_bcp,
                                                                 Register Rtmp,
                                                                 Label &profile_continue) {
   assert(ProfileInterpreter, "must be profiling interpreter");
@@ -1400,8 +1397,8 @@
   delayed()->nop();
 
   // Build it now.
-  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), cur_bcp);
-  set_method_data_pointer_offset(O0);
+  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+  set_method_data_pointer_for_bcp();
   ba(false, profile_continue);
   delayed()->nop();
   bind(done);
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -269,12 +269,11 @@
 
 #ifndef CC_INTERP
   // Interpreter profiling operations
-  void set_method_data_pointer() { set_method_data_pointer_offset(noreg); }
+  void set_method_data_pointer();
   void set_method_data_pointer_for_bcp();
-  void set_method_data_pointer_offset(Register mdi_reg);
   void test_method_data_pointer(Label& zero_continue);
   void verify_method_data_pointer();
-  void test_invocation_counter_for_mdp(Register invocation_count, Register cur_bcp, Register Rtmp, Label &profile_continue);
+  void test_invocation_counter_for_mdp(Register invocation_count, Register Rtmp, Label &profile_continue);
 
   void set_mdp_data_at(int constant, Register value);
   void increment_mdp_data_at(Address counter, Register bumped_count,
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -395,7 +395,7 @@
 //
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
-void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) {
+void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
   // Here is the register state during an interpreted call,
   // as set up by generate_method_handle_interpreter_entry():
   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
@@ -447,8 +447,9 @@
       // exception.  Since we use a C2I adapter to set up the
       // interpreter state, arguments are expected in compiler
       // argument registers.
-      methodHandle mh(raise_exception_method());
-      address c2i_entry = methodOopDesc::make_adapters(mh, CATCH);
+      assert(raise_exception_method(), "must be set");
+      address c2i_entry = raise_exception_method()->get_c2i_entry();
+      assert(c2i_entry, "method must be linked");
 
       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
 
--- a/src/cpu/sparc/vm/sparc.ad	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/sparc.ad	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -575,7 +575,11 @@
 
 int MachCallRuntimeNode::ret_addr_offset() {
 #ifdef _LP64
-  return NativeFarCall::instruction_size;  // farcall; delay slot
+  if (MacroAssembler::is_far_target(entry_point())) {
+    return NativeFarCall::instruction_size;
+  } else {
+    return NativeCall::instruction_size;
+  }
 #else
   return NativeCall::instruction_size;  // call; delay slot
 #endif
@@ -941,7 +945,7 @@
 #endif
 }
 
-void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) {
+void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) {
   // The method which records debug information at every safepoint
   // expects the call to be the first instruction in the snippet as
   // it creates a PcDesc structure which tracks the offset of a call
@@ -963,20 +967,7 @@
   int startpos = __ offset();
 #endif /* ASSERT */
 
-#ifdef _LP64
-  // Calls to the runtime or native may not be reachable from compiled code,
-  // so we generate the far call sequence on 64 bit sparc.
-  // This code sequence is relocatable to any address, even on LP64.
-  if ( force_far_call ) {
-    __ relocate(rtype);
-    AddressLiteral dest(entry_point);
-    __ jumpl_to(dest, O7, O7);
-  }
-  else
-#endif
-  {
-     __ call((address)entry_point, rtype);
-  }
+  __ call((address)entry_point, rtype);
 
   if (preserve_g2)   __ delayed()->mov(G2, L7);
   else __ delayed()->nop();
@@ -2507,7 +2498,7 @@
     // CALL directly to the runtime
     // The user of this is responsible for ensuring that R_L7 is empty (killed).
     emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type,
-                    /*preserve_g2=*/true, /*force far call*/true);
+                    /*preserve_g2=*/true);
   %}
 
   enc_class preserve_SP %{
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1364,15 +1364,8 @@
       // We have decided to profile this method in the interpreter
       __ bind(profile_method);
 
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true);
-
-#ifdef ASSERT
-      __ tst(O0);
-      __ breakpoint_trap(Assembler::notEqual);
-#endif
-
-      __ set_method_data_pointer();
-
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
       __ ba(false, profile_method_continue);
       __ delayed()->nop();
     }
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1689,7 +1689,7 @@
       const Register G4_invoke_ctr = G4;
       __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
       if (ProfileInterpreter) {
-        __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
+        __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
         if (UseOnStackReplacement) {
           __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
         }
@@ -3447,7 +3447,8 @@
     __ delayed()->nop();
 
     // bump total bytes allocated by this thread
-    __ incr_allocated_bytes(Roffset, 0, G1_scratch);
+    // RoldTopValue and RtopAddr are dead, so can use G1 and G3
+    __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
   }
 
   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1641,12 +1641,14 @@
 }
 
 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
+  Register len =  op->len()->as_register();
+  LP64_ONLY( __ movslq(len, len); )
+
   if (UseSlowPath ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
     __ jmp(*op->stub()->entry());
   } else {
-    Register len =  op->len()->as_register();
     Register tmp1 = op->tmp1()->as_register();
     Register tmp2 = op->tmp2()->as_register();
     Register tmp3 = op->tmp3()->as_register();
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -819,7 +819,7 @@
 // Set the method data pointer for the current bcp.
 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
   assert(ProfileInterpreter, "must be profiling interpreter");
-  Label zero_continue;
+  Label set_mdp;
   push(rax);
   push(rbx);
 
@@ -827,21 +827,17 @@
   // Test MDO to avoid the call if it is NULL.
   movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
   testptr(rax, rax);
-  jcc(Assembler::zero, zero_continue);
-
+  jcc(Assembler::zero, set_mdp);
   // rbx,: method
   // rsi: bcp
   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
   // rax,: mdi
-
+  // mdo is guaranteed to be non-zero here, we checked for it before the call.
   movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
-  testptr(rbx, rbx);
-  jcc(Assembler::zero, zero_continue);
   addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
-  addptr(rbx, rax);
-  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
-
-  bind(zero_continue);
+  addptr(rax, rbx);
+  bind(set_mdp);
+  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
   pop(rbx);
   pop(rax);
 }
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -855,7 +855,7 @@
 // Set the method data pointer for the current bcp.
 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
   assert(ProfileInterpreter, "must be profiling interpreter");
-  Label zero_continue;
+  Label set_mdp;
   push(rax);
   push(rbx);
 
@@ -863,21 +863,17 @@
   // Test MDO to avoid the call if it is NULL.
   movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
   testptr(rax, rax);
-  jcc(Assembler::zero, zero_continue);
-
+  jcc(Assembler::zero, set_mdp);
   // rbx: method
   // r13: bcp
   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
   // rax: mdi
-
+  // mdo is guaranteed to be non-zero here, we checked for it before the call.
   movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
-  testptr(rbx, rbx);
-  jcc(Assembler::zero, zero_continue);
   addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
-  addptr(rbx, rax);
-  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
-
-  bind(zero_continue);
+  addptr(rax, rbx);
+  bind(set_mdp);
+  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
   pop(rbx);
   pop(rax);
 }
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -390,7 +390,7 @@
 //
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
-void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) {
+void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
   // Here is the register state during an interpreted call,
   // as set up by generate_method_handle_interpreter_entry():
   // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
@@ -451,8 +451,9 @@
       // exception.  Since we use a C2I adapter to set up the
       // interpreter state, arguments are expected in compiler
       // argument registers.
-      methodHandle mh(raise_exception_method());
-      address c2i_entry = methodOopDesc::make_adapters(mh, CHECK);
+      assert(raise_exception_method(), "must be set");
+      address c2i_entry = raise_exception_method()->get_c2i_entry();
+      assert(c2i_entry, "method must be linked");
 
       const Register rdi_pc = rax;
       __ pop(rdi_pc);  // caller PC
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1367,15 +1367,8 @@
     if (ProfileInterpreter) {
       // We have decided to profile this method in the interpreter
       __ bind(profile_method);
-
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true);
-
-      __ movptr(rbx, Address(rbp, method_offset));   // restore methodOop
-      __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
-      __ test_method_data_pointer(rax, profile_method_continue);
-      __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
       __ jmp(profile_method_continue);
     }
     // Handle overflow of counter and compile method
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1383,20 +1383,8 @@
     if (ProfileInterpreter) {
       // We have decided to profile this method in the interpreter
       __ bind(profile_method);
-
-      __ call_VM(noreg,
-                 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method),
-                 r13, true);
-
-      __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
-      __ movptr(rax, Address(rbx,
-                             in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-                rax);
-      __ test_method_data_pointer(rax, profile_method_continue);
-      __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-              rax);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
       __ jmp(profile_method_continue);
     }
     // Handle overflow of counter and compile method
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1665,16 +1665,9 @@
     if (ProfileInterpreter) {
       // Out-of-line code to allocate method data oop.
       __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
       __ load_unsigned_byte(rbx, Address(rsi, 0));  // restore target bytecode
-      __ movptr(rcx, Address(rbp, method_offset));
-      __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
-      __ test_method_data_pointer(rcx, dispatch);
-      // offset non-null mdp by MDO::data_offset() + IR::profile_method()
-      __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
-      __ addptr(rcx, rax);
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
+      __ set_method_data_pointer_for_bcp();
       __ jmp(dispatch);
     }
 
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1695,21 +1695,9 @@
     if (ProfileInterpreter) {
       // Out-of-line code to allocate method data oop.
       __ bind(profile_method);
-      __ call_VM(noreg,
-                 CAST_FROM_FN_PTR(address,
-                                  InterpreterRuntime::profile_method), r13);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
       __ load_unsigned_byte(rbx, Address(r13, 0));  // restore target bytecode
-      __ movptr(rcx, Address(rbp, method_offset));
-      __ movptr(rcx, Address(rcx,
-                             in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-                rcx);
-      __ test_method_data_pointer(rcx, dispatch);
-      // offset non-null mdp by MDO::data_offset() + IR::profile_method()
-      __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
-      __ addptr(rcx, rax);
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-                rcx);
+      __ set_method_data_pointer_for_bcp();
       __ jmp(dispatch);
     }
 
--- a/src/os/linux/vm/os_linux.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os/linux/vm/os_linux.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1610,10 +1610,9 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() {
-  const char *prop = Arguments::get_property("java.io.tmpdir");
-  return prop == NULL ? "/tmp" : prop;
-}
+// This must be hard coded because it's the system's temporary
+// directory not the java application's temp directory, ala java.io.tmpdir.
+const char* os::get_temp_directory() { return "/tmp"; }
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1884,10 +1884,9 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() {
-  const char *prop = Arguments::get_property("java.io.tmpdir");
-  return prop == NULL ? "/tmp" : prop;
-}
+// This must be hard coded because it's the system's temporary
+// directory not the java application's temp directory, ala java.io.tmpdir.
+const char* os::get_temp_directory() { return "/tmp"; }
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
--- a/src/os/windows/vm/os_windows.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os/windows/vm/os_windows.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1044,9 +1044,9 @@
     return 0;
 }
 
+// This must be hard coded because it's the system's temporary
+// directory not the java application's temp directory, ala java.io.tmpdir.
 const char* os::get_temp_directory() {
-  const char *prop = Arguments::get_property("java.io.tmpdir");
-  if (prop != 0) return prop;
   static char path_buf[MAX_PATH];
   if (GetTempPath(MAX_PATH, path_buf)>0)
     return path_buf;
--- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,8 @@
 inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
 inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
 
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
   intptr_t rv;
   __asm__ volatile(
--- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,11 +100,6 @@
   return exchange_value;
 }
 
-extern "C" {
-  // defined in linux_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-}
-
 #ifdef AMD64
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
@@ -164,9 +159,9 @@
   return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
 }
 
-#else
-//inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-//inline void Atomic::store  (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
 
 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
   return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
@@ -189,6 +184,12 @@
   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
+extern "C" {
+  // defined in linux_x86.s
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value) {
   return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
 }
@@ -200,6 +201,21 @@
 inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value) {
   return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
 }
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
 #endif // AMD64
 
 #endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.s	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
 	.globl _mmx_Copy_arrayof_conjoint_jshorts
 
         .globl _Atomic_cmpxchg_long
+        .globl _Atomic_move_long
 
 	.text
 
@@ -653,3 +654,15 @@
         popl     %ebx
         ret
 
+
+        # Support for jlong Atomic::load and Atomic::store.
+        # void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
+        .p2align 4,,15
+	.type    _Atomic_move_long,@function
+_Atomic_move_long:
+        movl     4(%esp), %eax   # src
+        fildll    (%eax)
+        movl     8(%esp), %eax   # dest
+        fistpll   (%eax)
+        ret
+
--- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "vm_version_x86.hpp"
 
@@ -64,11 +65,11 @@
 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
 
@@ -79,11 +80,11 @@
 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
 
@@ -178,7 +179,7 @@
                           : "0" (v), "r" (p)
                           : "memory");
 #else
-  *p = v; fence();
+  release_store(p, v); fence();
 #endif // AMD64
 }
 
--- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,14 +151,22 @@
   return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
 }
 
-extern "C" void _Atomic_load_long(volatile jlong* src, volatile jlong* dst);
+extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
 
 inline jlong Atomic::load(volatile jlong* src) {
   volatile jlong dest;
-  _Atomic_load_long(src, &dest);
+  _Atomic_move_long(src, &dest);
   return dest;
 }
 
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
 #endif // AMD64
 
 #ifdef _GNU_SOURCE
--- a/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 #define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "vm_version_x86.hpp"
 
@@ -80,11 +81,11 @@
 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
 
@@ -95,11 +96,11 @@
 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
 
@@ -123,11 +124,11 @@
 inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { *p = v; fence(); }
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { release_store(p, v); fence(); }
 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { *p = v; fence(); }
-inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store(p, v); fence(); }
 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
 
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -104,8 +104,9 @@
       popl     %ebx
       .end
 
-  // Support for void Atomic::load(volatile jlong* src, volatile jlong* dest).
-      .inline _Atomic_load_long,2
+  // Support for jlong Atomic::load and Atomic::store.
+  // void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
+      .inline _Atomic_move_long,2
       movl     0(%esp), %eax   // src
       fildll    (%eax)
       movl     4(%esp), %eax   // dest
--- a/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,10 +137,10 @@
   return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
 }
 
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
 #else // !AMD64
 
-//inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-//inline void Atomic::store  (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
   int mp = os::is_MP();
   __asm {
@@ -254,6 +254,33 @@
 inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value) {
   return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
 }
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  volatile jlong* pdest = &dest;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, pdest
+    fistp    qword ptr [eax]
+  }
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  volatile jlong* src = &store_value;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, dest
+    fistp    qword ptr [eax]
+  }
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  Atomic::store(store_value, (volatile jlong*)dest);
+}
+
 #endif // AMD64
 
 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
--- a/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "vm_version_x86.hpp"
 
@@ -65,11 +66,11 @@
 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
 
@@ -80,11 +81,11 @@
 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
 
@@ -188,7 +189,7 @@
 #endif // AMD64
 }
 
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { release_store(p, v); fence(); }
 
 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1990,9 +1990,8 @@
 
   LIR_Opr reg = reg = rlock_result(x, x->basic_type());
 
+  get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
   if (x->is_volatile() && os::is_MP()) __ membar_acquire();
-  get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
-  if (x->is_volatile() && os::is_MP()) __ membar();
 }
 
 
@@ -2014,6 +2013,7 @@
 
   if (x->is_volatile() && os::is_MP()) __ membar_release();
   put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
+  if (x->is_volatile() && os::is_MP()) __ membar();
 }
 
 
--- a/src/share/vm/ci/ciMethodHandle.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/ci/ciMethodHandle.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,11 +38,12 @@
 // Return an adapter for this MethodHandle.
 ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
   VM_ENTRY_MARK;
-
   Handle h(get_oop());
   methodHandle callee(_callee->get_methodOop());
-  MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD);
-  methodHandle m = mhc.compile(CHECK_NULL);
+  // We catch all exceptions here that could happen in the method
+  // handle compiler and stop the VM.
+  MethodHandleCompiler mhc(h, callee, is_invokedynamic, CATCH);
+  methodHandle m = mhc.compile(CATCH);
   return CURRENT_ENV->get_object(m())->as_method();
 }
 
--- a/src/share/vm/classfile/systemDictionary.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/classfile/systemDictionary.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -172,6 +172,8 @@
                                                                               \
   template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
                                                                               \
+  template(sun_misc_PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt)       \
+                                                                              \
   /* Preload boxing klasses */                                                \
   template(Boolean_klass,                java_lang_Boolean,              Pre) \
   template(Character_klass,              java_lang_Character,            Pre) \
--- a/src/share/vm/classfile/vmSymbols.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,6 +111,7 @@
   template(sun_jkernel_DownloadManager,               "sun/jkernel/DownloadManager")              \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
   template(setBootClassLoaderHook_name,               "setBootClassLoaderHook")                   \
+  template(sun_misc_PostVMInitHook,                   "sun/misc/PostVMInitHook")                  \
                                                                                                   \
   /* class file format tags */                                                                    \
   template(tag_source_file,                           "SourceFile")                               \
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -3478,6 +3478,7 @@
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
   TraceCMSMemoryManagerStats tms(_collectorState);
+
   ReferenceProcessor* rp = ref_processor();
   SpecializationStats::clear();
   assert(_restart_addr == NULL, "Control point invariant");
@@ -5940,11 +5941,6 @@
   }
   rp->verify_no_references_recorded();
   assert(!rp->discovery_enabled(), "should have been disabled");
-
-  // JVMTI object tagging is based on JNI weak refs. If any of these
-  // refs were cleared then JVMTI needs to update its maps and
-  // maybe post ObjectFrees to agents.
-  JvmtiExport::cms_ref_processing_epilogue();
 }
 
 #ifndef PRODUCT
@@ -6305,6 +6301,7 @@
 
   switch (op) {
     case CMS_op_checkpointRootsInitial: {
+      SvcGCMarker sgcm(SvcGCMarker::OTHER);
       checkpointRootsInitial(true);       // asynch
       if (PrintGC) {
         _cmsGen->printOccupancy("initial-mark");
@@ -6312,6 +6309,7 @@
       break;
     }
     case CMS_op_checkpointRootsFinal: {
+      SvcGCMarker sgcm(SvcGCMarker::OTHER);
       checkpointRootsFinal(true,    // asynch
                            false,   // !clear_all_soft_refs
                            false);  // !init_mark_was_synchronous
@@ -7881,25 +7879,23 @@
 }
 
 // We need this destructor to reclaim any space at the end
-// of the space, which do_blk below may not have added back to
-// the free lists. [basically dealing with the "fringe effect"]
+// of the space, which do_blk below may not yet have added back to
+// the free lists.
 SweepClosure::~SweepClosure() {
   assert_lock_strong(_freelistLock);
-  // this should be treated as the end of a free run if any
-  // The current free range should be returned to the free lists
-  // as one coalesced chunk.
+  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
+         "sweep _limit out of bounds");
+  // Flush any remaining coterminal free run as a single
+  // coalesced chunk to the appropriate free list.
   if (inFreeRange()) {
-    flushCurFreeChunk(freeFinger(),
-      pointer_delta(_limit, freeFinger()));
-    assert(freeFinger() < _limit, "the finger pointeth off base");
+    assert(freeFinger() < _limit, "freeFinger points too high");
+    flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
     if (CMSTraceSweeper) {
-      gclog_or_tty->print("destructor:");
-      gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
-                 "[coalesced:"SIZE_FORMAT"]\n",
-                 freeFinger(), pointer_delta(_limit, freeFinger()),
-                 lastFreeRangeCoalesced());
-    }
-  }
+      gclog_or_tty->print("Sweep: last chunk: ");
+      gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
+                          freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
+    }
+  } // else nothing to flush
   NOT_PRODUCT(
     if (Verbose && PrintGC) {
       gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
@@ -7936,9 +7932,8 @@
 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
     bool freeRangeInFreeLists) {
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
-               freeFinger, _sp->block_size(freeFinger),
-               freeRangeInFreeLists);
+    gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
+               freeFinger, freeRangeInFreeLists);
   }
   assert(!inFreeRange(), "Trampling existing free range");
   set_inFreeRange(true);
@@ -7993,21 +7988,36 @@
   // may have caused us to coalesce the block ending at the address _limit
   // with a newly expanded chunk (this happens when _limit was set to the
   // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
-  if (addr >= _limit) { // we have swept up to or past the limit, do nothing more
+  if (addr >= _limit) { // we have swept up to or past the limit: finish up
     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
            "sweep _limit out of bounds");
     assert(addr < _sp->end(), "addr out of bounds");
-    // help the closure application finish
+    // Flush any remaining coterminal free run as a single
+    // coalesced chunk to the appropriate free list.
+    if (inFreeRange()) {
+      assert(freeFinger() < _limit, "finger points too high");
+      flush_cur_free_chunk(freeFinger(),
+                           pointer_delta(addr, freeFinger()));
+      if (CMSTraceSweeper) {
+        gclog_or_tty->print("Sweep: last chunk: ");
+        gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
+                   "[coalesced:"SIZE_FORMAT"]\n",
+                   freeFinger(), pointer_delta(addr, freeFinger()),
+                   lastFreeRangeCoalesced());
+      }
+    }
+
+    // help the iterator loop finish
     return pointer_delta(_sp->end(), addr);
   }
+
   assert(addr < _limit, "sweep invariant");
-
   // check if we should yield
   do_yield_check(addr);
   if (fc->isFree()) {
     // Chunk that is already free
     res = fc->size();
-    doAlreadyFreeChunk(fc);
+    do_already_free_chunk(fc);
     debug_only(_sp->verifyFreeLists());
     assert(res == fc->size(), "Don't expect the size to change");
     NOT_PRODUCT(
@@ -8017,7 +8027,7 @@
     NOT_PRODUCT(_last_fc = fc;)
   } else if (!_bitMap->isMarked(addr)) {
     // Chunk is fresh garbage
-    res = doGarbageChunk(fc);
+    res = do_garbage_chunk(fc);
     debug_only(_sp->verifyFreeLists());
     NOT_PRODUCT(
       _numObjectsFreed++;
@@ -8025,7 +8035,7 @@
     )
   } else {
     // Chunk that is alive.
-    res = doLiveChunk(fc);
+    res = do_live_chunk(fc);
     debug_only(_sp->verifyFreeLists());
     NOT_PRODUCT(
         _numObjectsLive++;
@@ -8078,7 +8088,7 @@
 // to a free list which may be overpopulated.
 //
 
-void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
+void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
   size_t size = fc->size();
   // Chunks that cannot be coalesced are not in the
   // free lists.
@@ -8094,23 +8104,23 @@
   // addr and purported end of this block.
   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
 
-  // Some chunks cannot be coalesced in under any circumstances.
+  // Some chunks cannot be coalesced under any circumstances.
   // See the definition of cantCoalesce().
   if (!fc->cantCoalesce()) {
     // This chunk can potentially be coalesced.
     if (_sp->adaptive_freelists()) {
       // All the work is done in
-      doPostIsFreeOrGarbageChunk(fc, size);
+      do_post_free_or_garbage_chunk(fc, size);
     } else {  // Not adaptive free lists
       // this is a free chunk that can potentially be coalesced by the sweeper;
       if (!inFreeRange()) {
         // if the next chunk is a free block that can't be coalesced
         // it doesn't make sense to remove this chunk from the free lists
         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
-        assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
-        if ((HeapWord*)nextChunk < _limit  &&    // there's a next chunk...
-            nextChunk->isFree()    &&            // which is free...
-            nextChunk->cantCoalesce()) {         // ... but cant be coalesced
+        assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
+        if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
+            nextChunk->isFree()               &&     // ... which is free...
+            nextChunk->cantCoalesce()) {             // ... but can't be coalesced
           // nothing to do
         } else {
           // Potentially the start of a new free range:
@@ -8156,14 +8166,14 @@
     // as the end of a free run if any
     if (inFreeRange()) {
       // we kicked some butt; time to pick up the garbage
-      assert(freeFinger() < addr, "the finger pointeth off base");
-      flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
+      assert(freeFinger() < addr, "freeFinger points too high");
+      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
     }
     // else, nothing to do, just continue
   }
 }
 
-size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
+size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
   // This is a chunk of garbage.  It is not in any free list.
   // Add it to a free list or let it possibly be coalesced into
   // a larger chunk.
@@ -8175,7 +8185,7 @@
     // addr and purported end of just dead object.
     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
 
-    doPostIsFreeOrGarbageChunk(fc, size);
+    do_post_free_or_garbage_chunk(fc, size);
   } else {
     if (!inFreeRange()) {
       // start of a new free range
@@ -8214,35 +8224,16 @@
   return size;
 }
 
-size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
+size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
   HeapWord* addr = (HeapWord*) fc;
   // The sweeper has just found a live object. Return any accumulated
   // left hand chunk to the free lists.
   if (inFreeRange()) {
-    if (_sp->adaptive_freelists()) {
-      flushCurFreeChunk(freeFinger(),
-                        pointer_delta(addr, freeFinger()));
-    } else { // not adaptive freelists
-      set_inFreeRange(false);
-      // Add the free range back to the free list if it is not already
-      // there.
-      if (!freeRangeInFreeLists()) {
-        assert(freeFinger() < addr, "the finger pointeth off base");
-        if (CMSTraceSweeper) {
-          gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
-            "[coalesced:%d]\n",
-            freeFinger(), pointer_delta(addr, freeFinger()),
-            lastFreeRangeCoalesced());
-        }
-        _sp->addChunkAndRepairOffsetTable(freeFinger(),
-          pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
-      }
-    }
-  }
-
-  // Common code path for original and adaptive free lists.
-
-  // this object is live: we'd normally expect this to be
+    assert(freeFinger() < addr, "freeFinger points too high");
+    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
+  }
+
+  // This object is live: we'd normally expect this to be
   // an oop, and like to assert the following:
   // assert(oop(addr)->is_oop(), "live block should be an oop");
   // However, as we commented above, this may be an object whose
@@ -8257,7 +8248,7 @@
     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
            "alignment problem");
 
-    #ifdef DEBUG
+#ifdef DEBUG
       if (oop(addr)->klass_or_null() != NULL &&
           (   !_collector->should_unload_classes()
            || (oop(addr)->is_parsable()) &&
@@ -8271,7 +8262,7 @@
                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
                "P-mark and computed size do not agree");
       }
-    #endif
+#endif
 
   } else {
     // This should be an initialized object that's alive.
@@ -8298,18 +8289,16 @@
   return size;
 }
 
-void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
-                                            size_t chunkSize) {
-  // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
-  // scheme.
+void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
+                                                 size_t chunkSize) {
+  // do_post_free_or_garbage_chunk() should only be called in the case
+  // of the adaptive free list allocator.
   bool fcInFreeLists = fc->isFree();
   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
   assert((HeapWord*)fc <= _limit, "sweep invariant");
   if (CMSTestInFreeList && fcInFreeLists) {
-    assert(_sp->verifyChunkInFreeLists(fc),
-      "free chunk is not in free lists");
-  }
-
+    assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
+  }
 
   if (CMSTraceSweeper) {
     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
@@ -8382,20 +8371,21 @@
     if (inFreeRange()) {
       // In a free range but cannot coalesce with the right hand chunk.
       // Put the current free range into the free lists.
-      flushCurFreeChunk(freeFinger(),
-        pointer_delta(addr, freeFinger()));
+      flush_cur_free_chunk(freeFinger(),
+                           pointer_delta(addr, freeFinger()));
     }
     // Set up for new free range.  Pass along whether the right hand
     // chunk is in the free lists.
     initialize_free_range((HeapWord*)fc, fcInFreeLists);
   }
 }
-void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
+
+void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   assert(size > 0,
     "A zero sized chunk cannot be added to the free lists.");
   if (!freeRangeInFreeLists()) {
-    if(CMSTestInFreeList) {
+    if (CMSTestInFreeList) {
       FreeChunk* fc = (FreeChunk*) chunk;
       fc->setSize(size);
       assert(!_sp->verifyChunkInFreeLists(fc),
@@ -8430,7 +8420,7 @@
   // chunk just flushed, they will need to wait for the next
   // sweep to be coalesced.
   if (inFreeRange()) {
-    flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
+    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
   }
 
   // First give up the locks, then yield, then re-lock.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1701,7 +1701,9 @@
   CMSCollector*                  _collector;  // collector doing the work
   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
   CompactibleFreeListSpace*      _sp;   // Space being swept
-  HeapWord*                      _limit;
+  HeapWord*                      _limit;// the address at which the sweep should stop because
+                                        // we do not expect blocks eligible for sweeping past
+                                        // that address.
   Mutex*                         _freelistLock; // Free list lock (in space)
   CMSBitMap*                     _bitMap;       // Marking bit map (in
                                                 // generation)
@@ -1745,14 +1747,13 @@
  private:
   // Code that is common to a free chunk or garbage when
   // encountered during sweeping.
-  void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
-                                  size_t chunkSize);
+  void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
   // Process a free chunk during sweeping.
-  void doAlreadyFreeChunk(FreeChunk *fc);
+  void do_already_free_chunk(FreeChunk *fc);
   // Process a garbage chunk during sweeping.
-  size_t doGarbageChunk(FreeChunk *fc);
+  size_t do_garbage_chunk(FreeChunk *fc);
   // Process a live chunk during sweeping.
-  size_t doLiveChunk(FreeChunk* fc);
+  size_t do_live_chunk(FreeChunk* fc);
 
   // Accessors.
   HeapWord* freeFinger() const          { return _freeFinger; }
@@ -1769,7 +1770,7 @@
   // Initialize a free range.
   void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
   // Return this chunk to the free lists.
-  void flushCurFreeChunk(HeapWord* chunk, size_t size);
+  void flush_cur_free_chunk(HeapWord* chunk, size_t size);
 
   // Check if we should yield and do so when necessary.
   inline void do_yield_check(HeapWord* addr);
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -31,6 +31,7 @@
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/shared/vmGCOperations.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
@@ -1142,6 +1143,8 @@
     return;
   }
 
+  SvcGCMarker sgcm(SvcGCMarker::OTHER);
+
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(before)");
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,7 +222,7 @@
 
 // Action_mark - update the BOT for the block [blk_start, blk_end).
 //               Current typical use is for splitting a block.
-// Action_single - udpate the BOT for an allocation.
+// Action_single - update the BOT for an allocation.
 // Action_verify - BOT verification.
 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
                                            HeapWord* blk_end,
@@ -331,47 +331,6 @@
   do_block_internal(blk_start, blk_end, Action_mark);
 }
 
-void G1BlockOffsetArray::join_blocks(HeapWord* blk1, HeapWord* blk2) {
-  HeapWord* blk1_start = Universe::heap()->block_start(blk1);
-  HeapWord* blk2_start = Universe::heap()->block_start(blk2);
-  assert(blk1 == blk1_start && blk2 == blk2_start,
-         "Must be block starts.");
-  assert(blk1 + _sp->block_size(blk1) == blk2, "Must be contiguous.");
-  size_t blk1_start_index = _array->index_for(blk1);
-  size_t blk2_start_index = _array->index_for(blk2);
-  assert(blk1_start_index <= blk2_start_index, "sanity");
-  HeapWord* blk2_card_start = _array->address_for_index(blk2_start_index);
-  if (blk2 == blk2_card_start) {
-    // blk2 starts a card.  Does blk1 start on the prevous card, or futher
-    // back?
-    assert(blk1_start_index < blk2_start_index, "must be lower card.");
-    if (blk1_start_index + 1 == blk2_start_index) {
-      // previous card; new value for blk2 card is size of blk1.
-      _array->set_offset_array(blk2_start_index, (u_char) _sp->block_size(blk1));
-    } else {
-      // Earlier card; go back a card.
-      _array->set_offset_array(blk2_start_index, N_words);
-    }
-  } else {
-    // blk2 does not start a card.  Does it cross a card?  If not, nothing
-    // to do.
-    size_t blk2_end_index =
-      _array->index_for(blk2 + _sp->block_size(blk2) - 1);
-    assert(blk2_end_index >= blk2_start_index, "sanity");
-    if (blk2_end_index > blk2_start_index) {
-      // Yes, it crosses a card.  The value for the next card must change.
-      if (blk1_start_index + 1 == blk2_start_index) {
-        // previous card; new value for second blk2 card is size of blk1.
-        _array->set_offset_array(blk2_start_index + 1,
-                                 (u_char) _sp->block_size(blk1));
-      } else {
-        // Earlier card; go back a card.
-        _array->set_offset_array(blk2_start_index + 1, N_words);
-      }
-    }
-  }
-}
-
 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
   assert(_bottom <= addr && addr < _end,
          "addr must be covered by this Array");
@@ -580,15 +539,50 @@
 #endif
 }
 
-void
-G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
-  assert(_end ==  new_end, "_end should have already been updated");
+bool
+G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
+                                      size_t word_size) const {
+  size_t first_card = _array->index_for(obj_start);
+  size_t last_card = _array->index_for(obj_start + word_size - 1);
+  if (!_array->is_card_boundary(obj_start)) {
+    // If the object is not on a card boundary the BOT entry of the
+    // first card should point to another object so we should not
+    // check that one.
+    first_card += 1;
+  }
+  for (size_t card = first_card; card <= last_card; card += 1) {
+    HeapWord* card_addr = _array->address_for_index(card);
+    HeapWord* block_start = block_start_const(card_addr);
+    if (block_start != obj_start) {
+      gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
+                             "card index: "SIZE_FORMAT" "
+                             "card addr: "PTR_FORMAT" BOT entry: %u "
+                             "obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
+                             "cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
+                             block_start, card, card_addr,
+                             _array->offset_array(card),
+                             obj_start, word_size, first_card, last_card);
+      return false;
+    }
+  }
+  return true;
+}
 
-  // The first BOT entry should have offset 0.
-  _array->set_offset_array(_array->index_for(_bottom), 0);
-  // The rest should point to the first one.
-  set_remainder_to_point_to_start(_bottom + N_words, new_end);
+#ifndef PRODUCT
+void
+G1BlockOffsetArray::print_on(outputStream* out) {
+  size_t from_index = _array->index_for(_bottom);
+  size_t to_index = _array->index_for(_end);
+  out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
+                "cards ["SIZE_FORMAT","SIZE_FORMAT")",
+                _bottom, _end, from_index, to_index);
+  for (size_t i = from_index; i < to_index; ++i) {
+    out->print_cr("  entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
+                  i, _array->address_for_index(i),
+                  (uint) _array->offset_array(i));
+  }
 }
+#endif // !PRODUCT
 
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetArrayContigSpace
@@ -641,10 +635,20 @@
 }
 
 void
-G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
-  G1BlockOffsetArray::set_for_starts_humongous(new_end);
+G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
+  assert(new_top <= _end, "_end should have already been updated");
+
+  // The first BOT entry should have offset 0.
+  zero_bottom_entry();
+  initialize_threshold();
+  alloc_block(_bottom, new_top);
+ }
 
-  // Make sure _next_offset_threshold and _next_offset_index point to new_end.
-  _next_offset_threshold = new_end;
-  _next_offset_index     = _array->index_for(new_end);
+#ifndef PRODUCT
+void
+G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
+  G1BlockOffsetArray::print_on(out);
+  out->print_cr("  next offset threshold: "PTR_FORMAT, _next_offset_threshold);
+  out->print_cr("  next offset index:     "SIZE_FORMAT, _next_offset_index);
 }
+#endif // !PRODUCT
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -352,11 +352,6 @@
   // The following methods are useful and optimized for a
   // general, non-contiguous space.
 
-  // The given arguments are required to be the starts of adjacent ("blk1"
-  // before "blk2") well-formed blocks covered by "this".  After this call,
-  // they should be considered to form one block.
-  virtual void join_blocks(HeapWord* blk1, HeapWord* blk2);
-
   // Given a block [blk_start, blk_start + full_blk_size), and
   // a left_blk_size < full_blk_size, adjust the BOT to show two
   // blocks [blk_start, blk_start + left_blk_size) and
@@ -429,6 +424,12 @@
     verify_single_block(blk, blk + size);
   }
 
+  // Used by region verification. Checks that the contents of the
+  // BOT reflect that there's a single object that spans the address
+  // range [obj_start, obj_start + word_size); returns true if this is
+  // the case, returns false if it's not.
+  bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
+
   // Verify that the given block is before _unallocated_block
   inline void verify_not_unallocated(HeapWord* blk_start,
                                      HeapWord* blk_end) const {
@@ -444,7 +445,7 @@
 
   void check_all_cards(size_t left_card, size_t right_card) const;
 
-  virtual void set_for_starts_humongous(HeapWord* new_end);
+  virtual void print_on(outputStream* out) PRODUCT_RETURN;
 };
 
 // A subtype of BlockOffsetArray that takes advantage of the fact
@@ -494,7 +495,9 @@
   HeapWord* block_start_unsafe(const void* addr);
   HeapWord* block_start_unsafe_const(const void* addr) const;
 
-  virtual void set_for_starts_humongous(HeapWord* new_end);
+  void set_for_starts_humongous(HeapWord* new_top);
+
+  virtual void print_on(outputStream* out) PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -610,6 +610,39 @@
   // of the free region list is revamped as part of CR 6977804.
   wait_for_cleanup_complete();
 
+  // Other threads might still be trying to allocate using CASes out
+  // of the region we are retiring, as they can do so without holding
+  // the Heap_lock. So we first have to make sure that noone else can
+  // allocate in it by doing a maximal allocation. Even if our CAS
+  // attempt fails a few times, we'll succeed sooner or later given
+  // that a failed CAS attempt mean that the region is getting closed
+  // to being full (someone else succeeded in allocating into it).
+  size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
+
+  // This is the minimum free chunk we can turn into a dummy
+  // object. If the free space falls below this, then noone can
+  // allocate in this region anyway (all allocation requests will be
+  // of a size larger than this) so we won't have to perform the dummy
+  // allocation.
+  size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
+
+  while (free_word_size >= min_word_size_to_fill) {
+    HeapWord* dummy =
+      cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
+    if (dummy != NULL) {
+      // If the allocation was successful we should fill in the space.
+      CollectedHeap::fill_with_object(dummy, free_word_size);
+      break;
+    }
+
+    free_word_size = cur_alloc_region->free() / HeapWordSize;
+    // It's also possible that someone else beats us to the
+    // allocation and they fill up the region. In that case, we can
+    // just get out of the loop
+  }
+  assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
+         "sanity");
+
   retire_cur_alloc_region_common(cur_alloc_region);
   assert(_cur_alloc_region == NULL, "post-condition");
 }
@@ -661,27 +694,29 @@
       // young type.
       OrderAccess::storestore();
 
-      // Now allocate out of the new current alloc region. We could
-      // have re-used allocate_from_cur_alloc_region() but its
-      // operation is slightly different to what we need here. First,
-      // allocate_from_cur_alloc_region() is only called outside a
-      // safepoint and will always unlock the Heap_lock if it returns
-      // a non-NULL result. Second, it assumes that the current alloc
-      // region is what's already assigned in _cur_alloc_region. What
-      // we want here is to actually do the allocation first before we
-      // assign the new region to _cur_alloc_region. This ordering is
-      // not currently important, but it will be essential when we
-      // change the code to support CAS allocation in the future (see
-      // CR 6994297).
-      //
-      // This allocate method does BOT updates and we don't need them in
-      // the young generation. This will be fixed in the near future by
-      // CR 6994297.
-      HeapWord* result = new_cur_alloc_region->allocate(word_size);
+      // Now, perform the allocation out of the region we just
+      // allocated. Note that noone else can access that region at
+      // this point (as _cur_alloc_region has not been updated yet),
+      // so we can just go ahead and do the allocation without any
+      // atomics (and we expect this allocation attempt to
+      // suceeded). Given that other threads can attempt an allocation
+      // with a CAS and without needing the Heap_lock, if we assigned
+      // the new region to _cur_alloc_region before first allocating
+      // into it other threads might have filled up the new region
+      // before we got a chance to do the allocation ourselves. In
+      // that case, we would have needed to retire the region, grab a
+      // new one, and go through all this again. Allocating out of the
+      // new region before assigning it to _cur_alloc_region avoids
+      // all this.
+      HeapWord* result =
+                     new_cur_alloc_region->allocate_no_bot_updates(word_size);
       assert(result != NULL, "we just allocate out of an empty region "
              "so allocation should have been successful");
       assert(is_in(result), "result should be in the heap");
 
+      // Now make sure that the store to _cur_alloc_region does not
+      // float above the store to top.
+      OrderAccess::storestore();
       _cur_alloc_region = new_cur_alloc_region;
 
       if (!at_safepoint) {
@@ -718,6 +753,9 @@
   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
     bool succeeded = true;
 
+    // Every time we go round the loop we should be holding the Heap_lock.
+    assert_heap_locked();
+
     {
       // We may have concurrent cleanup working at the time. Wait for
       // it to complete. In the future we would probably want to make
@@ -734,7 +772,8 @@
       // attempt as it's redundant (we only reach here after an
       // allocation attempt has been unsuccessful).
       wait_for_cleanup_complete();
-      HeapWord* result = attempt_allocation(word_size);
+
+      HeapWord* result = attempt_allocation_locked(word_size);
       if (result != NULL) {
         assert_heap_not_locked();
         return result;
@@ -748,7 +787,6 @@
       if (g1_policy()->can_expand_young_list()) {
         // Yes, we are allowed to expand the young gen. Let's try to
         // allocate a new current alloc region.
-
         HeapWord* result =
           replace_cur_alloc_region_and_allocate(word_size,
                                                 false, /* at_safepoint */
@@ -771,20 +809,23 @@
       // rather than causing more, now probably unnecessary, GC attempts.
       JavaThread* jthr = JavaThread::current();
       assert(jthr != NULL, "sanity");
-      if (!jthr->in_critical()) {
-        MutexUnlocker mul(Heap_lock);
-        GC_locker::stall_until_clear();
-
-        // We'll then fall off the end of the ("if GC locker active")
-        // if-statement and retry the allocation further down in the
-        // loop.
-      } else {
+      if (jthr->in_critical()) {
         if (CheckJNICalls) {
           fatal("Possible deadlock due to allocating while"
                 " in jni critical section");
         }
+        // We are returning NULL so the protocol is that we're still
+        // holding the Heap_lock.
+        assert_heap_locked();
         return NULL;
       }
+
+      Heap_lock->unlock();
+      GC_locker::stall_until_clear();
+
+      // No need to relock the Heap_lock. We'll fall off to the code
+      // below the else-statement which assumes that we are not
+      // holding the Heap_lock.
     } else {
       // We are not locked out. So, let's try to do a GC. The VM op
       // will retry the allocation before it completes.
@@ -805,11 +846,10 @@
         dirty_young_block(result, word_size);
         return result;
       }
-
-      Heap_lock->lock();
     }
 
-    assert_heap_locked();
+    // Both paths that get us here from above unlock the Heap_lock.
+    assert_heap_not_locked();
 
     // We can reach here when we were unsuccessful in doing a GC,
     // because another thread beat us to it, or because we were locked
@@ -948,10 +988,8 @@
     if (!expect_null_cur_alloc_region) {
       HeapRegion* cur_alloc_region = _cur_alloc_region;
       if (cur_alloc_region != NULL) {
-        // This allocate method does BOT updates and we don't need them in
-        // the young generation. This will be fixed in the near future by
-        // CR 6994297.
-        HeapWord* result = cur_alloc_region->allocate(word_size);
+        // We are at a safepoint so no reason to use the MT-safe version.
+        HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
         if (result != NULL) {
           assert(is_in(result), "result should be in the heap");
 
@@ -983,20 +1021,17 @@
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
 
-  Heap_lock->lock();
-
-  // First attempt: try allocating out of the current alloc region or
-  // after replacing the current alloc region.
+  // First attempt: Try allocating out of the current alloc region
+  // using a CAS. If that fails, take the Heap_lock and retry the
+  // allocation, potentially replacing the current alloc region.
   HeapWord* result = attempt_allocation(word_size);
   if (result != NULL) {
     assert_heap_not_locked();
     return result;
   }
 
-  assert_heap_locked();
-
-  // Second attempt: go into the even slower path where we might
-  // try to schedule a collection.
+  // Second attempt: Go to the slower path where we might try to
+  // schedule a collection.
   result = attempt_allocation_slow(word_size);
   if (result != NULL) {
     assert_heap_not_locked();
@@ -1004,6 +1039,7 @@
   }
 
   assert_heap_locked();
+  // Need to unlock the Heap_lock before returning.
   Heap_lock->unlock();
   return NULL;
 }
@@ -1022,11 +1058,10 @@
   for (int try_count = 1; /* we'll return */; try_count += 1) {
     unsigned int gc_count_before;
     {
-      Heap_lock->lock();
-
       if (!isHumongous(word_size)) {
-        // First attempt: try allocating out of the current alloc
-        // region or after replacing the current alloc region.
+        // First attempt: Try allocating out of the current alloc region
+        // using a CAS. If that fails, take the Heap_lock and retry the
+        // allocation, potentially replacing the current alloc region.
         HeapWord* result = attempt_allocation(word_size);
         if (result != NULL) {
           assert_heap_not_locked();
@@ -1035,14 +1070,17 @@
 
         assert_heap_locked();
 
-        // Second attempt: go into the even slower path where we might
-        // try to schedule a collection.
+        // Second attempt: Go to the slower path where we might try to
+        // schedule a collection.
         result = attempt_allocation_slow(word_size);
         if (result != NULL) {
           assert_heap_not_locked();
           return result;
         }
       } else {
+        // attempt_allocation_humongous() requires the Heap_lock to be held.
+        Heap_lock->lock();
+
         HeapWord* result = attempt_allocation_humongous(word_size,
                                                      false /* at_safepoint */);
         if (result != NULL) {
@@ -1054,7 +1092,8 @@
       assert_heap_locked();
       // Read the gc count while the heap lock is held.
       gc_count_before = SharedHeap::heap()->total_collections();
-      // We cannot be at a safepoint, so it is safe to unlock the Heap_lock
+
+      // Release the Heap_lock before attempting the collection.
       Heap_lock->unlock();
     }
 
@@ -1192,7 +1231,7 @@
     return false;
   }
 
-  DTraceGCProbeMarker gc_probe_marker(true /* full */);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
   ResourceMark rm;
 
   if (PrintHeapAtGC) {
@@ -1868,7 +1907,7 @@
 
   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
                         HeapRegion::GrainBytes,
-                        false /*ism*/, addr);
+                        UseLargePages, addr);
 
   if (UseCompressedOops) {
     if (addr != NULL && !heap_rs.is_reserved()) {
@@ -1877,13 +1916,13 @@
       // Try again to reserver heap higher.
       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
-                             false /*ism*/, addr);
+                             UseLargePages, addr);
       if (addr != NULL && !heap_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
-                               false /*ism*/, addr);
+                               UseLargePages, addr);
         heap_rs = heap_rs1;
       } else {
         heap_rs = heap_rs0;
@@ -3214,7 +3253,7 @@
     return false;
   }
 
-  DTraceGCProbeMarker gc_probe_marker(false /* full */);
+  SvcGCMarker sgcm(SvcGCMarker::MINOR);
   ResourceMark rm;
 
   if (PrintHeapAtGC) {
@@ -3856,13 +3895,15 @@
   size_t _next_marked_bytes;
   OopsInHeapRegionClosure *_cl;
 public:
-  RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
-    _g1(g1), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
+  RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr,
+                           OopsInHeapRegionClosure* cl) :
+    _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
     _next_marked_bytes(0), _cl(cl) {}
 
   size_t prev_marked_bytes() { return _prev_marked_bytes; }
   size_t next_marked_bytes() { return _next_marked_bytes; }
 
+  // <original comment>
   // The original idea here was to coalesce evacuated and dead objects.
   // However that caused complications with the block offset table (BOT).
   // In particular if there were two TLABs, one of them partially refined.
@@ -3871,15 +3912,24 @@
   // of TLAB_2. If the last object of the TLAB_1 and the first object
   // of TLAB_2 are coalesced, then the cards of the unrefined part
   // would point into middle of the filler object.
+  // The current approach is to not coalesce and leave the BOT contents intact.
+  // </original comment>
   //
-  // The current approach is to not coalesce and leave the BOT contents intact.
+  // We now reset the BOT when we start the object iteration over the
+  // region and refine its entries for every object we come across. So
+  // the above comment is not really relevant and we should be able
+  // to coalesce dead objects if we want to.
   void do_object(oop obj) {
+    HeapWord* obj_addr = (HeapWord*) obj;
+    assert(_hr->is_in(obj_addr), "sanity");
+    size_t obj_size = obj->size();
+    _hr->update_bot_for_object(obj_addr, obj_size);
     if (obj->is_forwarded() && obj->forwardee() == obj) {
       // The object failed to move.
       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
       _cm->markPrev(obj);
       assert(_cm->isPrevMarked(obj), "Should be marked!");
-      _prev_marked_bytes += (obj->size() * HeapWordSize);
+      _prev_marked_bytes += (obj_size * HeapWordSize);
       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
         _cm->markAndGrayObjectIfNecessary(obj);
       }
@@ -3901,7 +3951,7 @@
     } else {
       // The object has been either evacuated or is dead. Fill it with a
       // dummy object.
-      MemRegion mr((HeapWord*)obj, obj->size());
+      MemRegion mr((HeapWord*)obj, obj_size);
       CollectedHeap::fill_with_object(mr);
       _cm->clearRangeBothMaps(mr);
     }
@@ -3921,10 +3971,13 @@
   HeapRegion* cur = g1_policy()->collection_set();
   while (cur != NULL) {
     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
-
-    RemoveSelfPointerClosure rspc(_g1h, cl);
+    assert(!cur->isHumongous(), "sanity");
+
     if (cur->evacuation_failed()) {
       assert(cur->in_collection_set(), "bad CS");
+      RemoveSelfPointerClosure rspc(_g1h, cur, cl);
+
+      cur->reset_bot();
       cl->set_region(cur);
       cur->object_iterate(&rspc);
 
@@ -3989,15 +4042,6 @@
   }
 }
 
-void G1CollectedHeap::handle_evacuation_failure(oop old) {
-  markOop m = old->mark();
-  // forward to self
-  assert(!old->is_forwarded(), "precondition");
-
-  old->forward_to(old);
-  handle_evacuation_failure_common(old, m);
-}
-
 oop
 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
                                                oop old) {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -430,7 +430,8 @@
                                  bool*  gc_overhead_limit_was_exceeded);
 
   // The following methods, allocate_from_cur_allocation_region(),
-  // attempt_allocation(), replace_cur_alloc_region_and_allocate(),
+  // attempt_allocation(), attempt_allocation_locked(),
+  // replace_cur_alloc_region_and_allocate(),
   // attempt_allocation_slow(), and attempt_allocation_humongous()
   // have very awkward pre- and post-conditions with respect to
   // locking:
@@ -481,20 +482,30 @@
   // successfully manage to allocate it, or NULL.
 
   // It tries to satisfy an allocation request out of the current
-  // allocating region, which is passed as a parameter. It assumes
-  // that the caller has checked that the current allocating region is
-  // not NULL. Given that the caller has to check the current
-  // allocating region for at least NULL, it might as well pass it as
-  // the first parameter so that the method doesn't have to read it
-  // from the _cur_alloc_region field again.
+  // alloc region, which is passed as a parameter. It assumes that the
+  // caller has checked that the current alloc region is not NULL.
+  // Given that the caller has to check the current alloc region for
+  // at least NULL, it might as well pass it as the first parameter so
+  // that the method doesn't have to read it from the
+  // _cur_alloc_region field again. It is called from both
+  // attempt_allocation() and attempt_allocation_locked() and the
+  // with_heap_lock parameter indicates whether the caller was holding
+  // the heap lock when it called it or not.
   inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
-                                                  size_t word_size);
+                                                  size_t word_size,
+                                                  bool with_heap_lock);
 
-  // It attempts to allocate out of the current alloc region. If that
-  // fails, it retires the current alloc region (if there is one),
-  // tries to get a new one and retries the allocation.
+  // First-level of allocation slow path: it attempts to allocate out
+  // of the current alloc region in a lock-free manner using a CAS. If
+  // that fails it takes the Heap_lock and calls
+  // attempt_allocation_locked() for the second-level slow path.
   inline HeapWord* attempt_allocation(size_t word_size);
 
+  // Second-level of allocation slow path: while holding the Heap_lock
+  // it tries to allocate out of the current alloc region and, if that
+  // fails, tries to allocate out of a new current alloc region.
+  inline HeapWord* attempt_allocation_locked(size_t word_size);
+
   // It assumes that the current alloc region has been retired and
   // tries to allocate a new one. If it's successful, it performs the
   // allocation out of the new current alloc region and updates
@@ -506,11 +517,11 @@
                                                   bool do_dirtying,
                                                   bool can_expand);
 
-  // The slow path when we are unable to allocate a new current alloc
-  // region to satisfy an allocation request (i.e., when
-  // attempt_allocation() fails). It will try to do an evacuation
-  // pause, which might stall due to the GC locker, and retry the
-  // allocation attempt when appropriate.
+  // Third-level of allocation slow path: when we are unable to
+  // allocate a new current alloc region to satisfy an allocation
+  // request (i.e., when attempt_allocation_locked() fails). It will
+  // try to do an evacuation pause, which might stall due to the GC
+  // locker, and retry the allocation attempt when appropriate.
   HeapWord* attempt_allocation_slow(size_t word_size);
 
   // The method that tries to satisfy a humongous allocation
@@ -826,7 +837,6 @@
   void finalize_for_evac_failure();
 
   // An attempt to evacuate "obj" has failed; take necessary steps.
-  void handle_evacuation_failure(oop obj);
   oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
   void handle_evacuation_failure_common(oop obj, markOop m);
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,10 +63,12 @@
 // assumptions of this method (and other related ones).
 inline HeapWord*
 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
-                                                size_t word_size) {
-  assert_heap_locked_and_not_at_safepoint();
+                                                size_t word_size,
+                                                bool with_heap_lock) {
+  assert_not_at_safepoint();
+  assert(with_heap_lock == Heap_lock->owned_by_self(),
+         "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
   assert(cur_alloc_region != NULL, "pre-condition of the method");
-  assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
   assert(cur_alloc_region->is_young(),
          "we only support young current alloc regions");
   assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
@@ -76,20 +78,24 @@
   assert(!cur_alloc_region->is_empty(),
          err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
                  cur_alloc_region->bottom(), cur_alloc_region->end()));
-  // This allocate method does BOT updates and we don't need them in
-  // the young generation. This will be fixed in the near future by
-  // CR 6994297.
-  HeapWord* result = cur_alloc_region->allocate(word_size);
+  HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
   if (result != NULL) {
     assert(is_in(result), "result should be in the heap");
-    Heap_lock->unlock();
 
+    if (with_heap_lock) {
+      Heap_lock->unlock();
+    }
+    assert_heap_not_locked();
     // Do the dirtying after we release the Heap_lock.
     dirty_young_block(result, word_size);
     return result;
   }
 
-  assert_heap_locked();
+  if (with_heap_lock) {
+    assert_heap_locked();
+  } else {
+    assert_heap_not_locked();
+  }
   return NULL;
 }
 
@@ -97,31 +103,27 @@
 // assumptions of this method (and other related ones).
 inline HeapWord*
 G1CollectedHeap::attempt_allocation(size_t word_size) {
-  assert_heap_locked_and_not_at_safepoint();
+  assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "attempt_allocation() should not be called "
          "for humongous allocation requests");
 
   HeapRegion* cur_alloc_region = _cur_alloc_region;
   if (cur_alloc_region != NULL) {
     HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
-                                                      word_size);
+                                                   word_size,
+                                                   false /* with_heap_lock */);
+    assert_heap_not_locked();
     if (result != NULL) {
-      assert_heap_not_locked();
       return result;
     }
-
-    assert_heap_locked();
-
-    // Since we couldn't successfully allocate into it, retire the
-    // current alloc region.
-    retire_cur_alloc_region(cur_alloc_region);
   }
 
-  // Try to get a new region and allocate out of it
-  HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
-                                                     false, /* at_safepoint */
-                                                     true,  /* do_dirtying */
-                                                     false  /* can_expand */);
+  // Our attempt to allocate lock-free failed as the current
+  // allocation region is either NULL or full. So, we'll now take the
+  // Heap_lock and retry.
+  Heap_lock->lock();
+
+  HeapWord* result = attempt_allocation_locked(word_size);
   if (result != NULL) {
     assert_heap_not_locked();
     return result;
@@ -145,6 +147,45 @@
   _cur_alloc_region = NULL;
 }
 
+inline HeapWord*
+G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
+  assert_heap_locked_and_not_at_safepoint();
+  assert(!isHumongous(word_size), "attempt_allocation_locked() "
+         "should not be called for humongous allocation requests");
+
+  // First, reread the current alloc region and retry the allocation
+  // in case somebody replaced it while we were waiting to get the
+  // Heap_lock.
+  HeapRegion* cur_alloc_region = _cur_alloc_region;
+  if (cur_alloc_region != NULL) {
+    HeapWord* result = allocate_from_cur_alloc_region(
+                                                  cur_alloc_region, word_size,
+                                                  true /* with_heap_lock */);
+    if (result != NULL) {
+      assert_heap_not_locked();
+      return result;
+    }
+
+    // We failed to allocate out of the current alloc region, so let's
+    // retire it before getting a new one.
+    retire_cur_alloc_region(cur_alloc_region);
+  }
+
+  assert_heap_locked();
+  // Try to get a new region and allocate out of it
+  HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
+                                                     false, /* at_safepoint */
+                                                     true,  /* do_dirtying */
+                                                     false  /* can_expand */);
+  if (result != NULL) {
+    assert_heap_not_locked();
+    return result;
+  }
+
+  assert_heap_locked();
+  return NULL;
+}
+
 // It dirties the cards that cover the block so that so that the post
 // write barrier never queues anything when updating objects on this
 // block. It is assumed (and in fact we assert) that the block
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -386,26 +386,27 @@
 }
 // </PREDICTION>
 
-void HeapRegion::set_startsHumongous(HeapWord* new_end) {
+void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
   assert(end() == _orig_end,
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
+  assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 
   _humongous_type = StartsHumongous;
   _humongous_start_region = this;
 
   set_end(new_end);
-  _offsets.set_for_starts_humongous(new_end);
+  _offsets.set_for_starts_humongous(new_top);
 }
 
-void HeapRegion::set_continuesHumongous(HeapRegion* start) {
+void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
   assert(end() == _orig_end,
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
-  assert(start->startsHumongous(), "pre-condition");
+  assert(first_hr->startsHumongous(), "pre-condition");
 
   _humongous_type = ContinuesHumongous;
-  _humongous_start_region = start;
+  _humongous_start_region = first_hr;
 }
 
 bool HeapRegion::claimHeapRegion(jint claimValue) {
@@ -782,9 +783,6 @@
   verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
 }
 
-#define OBJ_SAMPLE_INTERVAL 0
-#define BLOCK_SAMPLE_INTERVAL 100
-
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 // We would need a mechanism to make that code skip dead objects.
 
@@ -795,83 +793,125 @@
   *failures = false;
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
-  int objs = 0;
-  int blocks = 0;
   VerifyLiveClosure vl_cl(g1, use_prev_marking);
   bool is_humongous = isHumongous();
+  bool do_bot_verify = !is_young();
   size_t object_num = 0;
   while (p < top()) {
-    size_t size = oop(p)->size();
-    if (is_humongous != g1->isHumongous(size)) {
+    oop obj = oop(p);
+    size_t obj_size = obj->size();
+    object_num += 1;
+
+    if (is_humongous != g1->isHumongous(obj_size)) {
       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                              SIZE_FORMAT" words) in a %shumongous region",
-                             p, g1->isHumongous(size) ? "" : "non-",
-                             size, is_humongous ? "" : "non-");
+                             p, g1->isHumongous(obj_size) ? "" : "non-",
+                             obj_size, is_humongous ? "" : "non-");
        *failures = true;
+       return;
+    }
+
+    // If it returns false, verify_for_object() will output the
+    // appropriate messasge.
+    if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
+      *failures = true;
+      return;
     }
-    object_num += 1;
-    if (blocks == BLOCK_SAMPLE_INTERVAL) {
-      HeapWord* res = block_start_const(p + (size/2));
-      if (p != res) {
-        gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
-                               SIZE_FORMAT" returned "PTR_FORMAT,
-                               p, size, res);
+
+    if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
+      if (obj->is_oop()) {
+        klassOop klass = obj->klass();
+        if (!klass->is_perm()) {
+          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
+                                 "not in perm", klass, obj);
+          *failures = true;
+          return;
+        } else if (!klass->is_klass()) {
+          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
+                                 "not a klass", klass, obj);
+          *failures = true;
+          return;
+        } else {
+          vl_cl.set_containing_obj(obj);
+          obj->oop_iterate(&vl_cl);
+          if (vl_cl.failures()) {
+            *failures = true;
+          }
+          if (G1MaxVerifyFailures >= 0 &&
+              vl_cl.n_failures() >= G1MaxVerifyFailures) {
+            return;
+          }
+        }
+      } else {
+        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
         *failures = true;
         return;
       }
-      blocks = 0;
-    } else {
-      blocks++;
-    }
-    if (objs == OBJ_SAMPLE_INTERVAL) {
-      oop obj = oop(p);
-      if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
-        if (obj->is_oop()) {
-          klassOop klass = obj->klass();
-          if (!klass->is_perm()) {
-            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                   "not in perm", klass, obj);
-            *failures = true;
-            return;
-          } else if (!klass->is_klass()) {
-            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                   "not a klass", klass, obj);
-            *failures = true;
-            return;
-          } else {
-            vl_cl.set_containing_obj(obj);
-            obj->oop_iterate(&vl_cl);
-            if (vl_cl.failures()) {
-              *failures = true;
-            }
-            if (G1MaxVerifyFailures >= 0 &&
-                vl_cl.n_failures() >= G1MaxVerifyFailures) {
-              return;
-            }
-          }
-        } else {
-          gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
-          *failures = true;
-          return;
-        }
-      }
-      objs = 0;
-    } else {
-      objs++;
     }
     prev_p = p;
-    p += size;
+    p += obj_size;
+  }
+
+  if (p != top()) {
+    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
+                           "does not match top "PTR_FORMAT, p, top());
+    *failures = true;
+    return;
   }
-  HeapWord* rend = end();
-  HeapWord* rtop = top();
-  if (rtop < rend) {
-    HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
-    if (res != rtop) {
-        gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
-                               PTR_FORMAT" returned "PTR_FORMAT,
-                               rtop, rend, res);
+
+  HeapWord* the_end = end();
+  assert(p == top(), "it should still hold");
+  // Do some extra BOT consistency checking for addresses in the
+  // range [top, end). BOT look-ups in this range should yield
+  // top. No point in doing that if top == end (there's nothing there).
+  if (p < the_end) {
+    // Look up top
+    HeapWord* addr_1 = p;
+    HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
+    if (b_start_1 != p) {
+      gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
+                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                             addr_1, b_start_1, p);
+      *failures = true;
+      return;
+    }
+
+    // Look up top + 1
+    HeapWord* addr_2 = p + 1;
+    if (addr_2 < the_end) {
+      HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
+      if (b_start_2 != p) {
+        gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
+                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                               addr_2, b_start_2, p);
         *failures = true;
         return;
+      }
+    }
+
+    // Look up an address between top and end
+    size_t diff = pointer_delta(the_end, p) / 2;
+    HeapWord* addr_3 = p + diff;
+    if (addr_3 < the_end) {
+      HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
+      if (b_start_3 != p) {
+        gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
+                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                               addr_3, b_start_3, p);
+        *failures = true;
+        return;
+      }
+    }
+
+    // Loook up end - 1
+    HeapWord* addr_4 = the_end - 1;
+    HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
+    if (b_start_4 != p) {
+      gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
+                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                             addr_4, b_start_4, p);
+      *failures = true;
+      return;
     }
   }
 
@@ -880,12 +920,6 @@
                            "but has "SIZE_FORMAT", objects",
                            bottom(), end(), object_num);
     *failures = true;
-  }
-
-  if (p != top()) {
-    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
-                           "does not match top "PTR_FORMAT, p, top());
-    *failures = true;
     return;
   }
 }
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -173,6 +173,19 @@
   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 
   virtual void print() const;
+
+  void reset_bot() {
+    _offsets.zero_bottom_entry();
+    _offsets.initialize_threshold();
+  }
+
+  void update_bot_for_object(HeapWord* start, size_t word_size) {
+    _offsets.alloc_block(start, word_size);
+  }
+
+  void print_bot_on(outputStream* out) {
+    _offsets.print_on(out);
+  }
 };
 
 class HeapRegion: public G1OffsetTableContigSpace {
@@ -359,6 +372,15 @@
     Allocated
   };
 
+  inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
+    assert(is_young(), "we can only skip BOT updates on young regions");
+    return ContiguousSpace::par_allocate(word_size);
+  }
+  inline HeapWord* allocate_no_bot_updates(size_t word_size) {
+    assert(is_young(), "we can only skip BOT updates on young regions");
+    return ContiguousSpace::allocate(word_size);
+  }
+
   // If this region is a member of a HeapRegionSeq, the index in that
   // sequence, otherwise -1.
   int hrs_index() const { return _hrs_index; }
@@ -404,13 +426,35 @@
     return _humongous_start_region;
   }
 
-  // Causes the current region to represent a humongous object spanning "n"
-  // regions.
-  void set_startsHumongous(HeapWord* new_end);
+  // Makes the current region be a "starts humongous" region, i.e.,
+  // the first region in a series of one or more contiguous regions
+  // that will contain a single "humongous" object. The two parameters
+  // are as follows:
+  //
+  // new_top : The new value of the top field of this region which
+  // points to the end of the humongous object that's being
+  // allocated. If there is more than one region in the series, top
+  // will lie beyond this region's original end field and on the last
+  // region in the series.
+  //
+  // new_end : The new value of the end field of this region which
+  // points to the end of the last region in the series. If there is
+  // one region in the series (namely: this one) end will be the same
+  // as the original end of this region.
+  //
+  // Updating top and end as described above makes this region look as
+  // if it spans the entire space taken up by all the regions in the
+  // series and an single allocation moved its top to new_top. This
+  // ensures that the space (capacity / allocated) taken up by all
+  // humongous regions can be calculated by just looking at the
+  // "starts humongous" regions and by ignoring the "continues
+  // humongous" regions.
+  void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
 
-  // The regions that continue a humongous sequence should be added using
-  // this method, in increasing address order.
-  void set_continuesHumongous(HeapRegion* start);
+  // Makes the current region be a "continues humongous'
+  // region. first_hr is the "start humongous" region of the series
+  // which this region will be part of.
+  void set_continuesHumongous(HeapRegion* first_hr);
 
   // If the region has a remembered set, return a pointer to it.
   HeapRegionRemSet* rem_set() const {
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,7 +144,7 @@
     // will also update the BOT covering all the regions to reflect
     // that there is a single object that starts at the bottom of the
     // first region.
-    first_hr->set_startsHumongous(new_end);
+    first_hr->set_startsHumongous(new_top, new_end);
 
     // Then, if there are any, we will set up the "continues
     // humongous" regions.
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -38,7 +38,6 @@
 }
 
 void VM_G1CollectForAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
   assert(_result == NULL || _pause_succeeded,
@@ -46,7 +45,6 @@
 }
 
 void VM_G1CollectFull::doit() {
-  JvmtiGCFullMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   GCCauseSetter x(g1h, _gc_cause);
   g1h->do_full_collection(false /* clear_all_soft_refs */);
@@ -72,7 +70,6 @@
 }
 
 void VM_G1IncCollectionPause::doit() {
-  JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert(!_should_initiate_conc_mark ||
   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -42,8 +42,7 @@
 }
 
 void VM_ParallelGCFailedAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
-  notify_gc_begin(false);
+  SvcGCMarker sgcm(SvcGCMarker::MINOR);
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@@ -54,8 +53,6 @@
   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-
-  notify_gc_end();
 }
 
 VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size,
@@ -67,8 +64,7 @@
 }
 
 void VM_ParallelGCFailedPermanentAllocation::doit() {
-  JvmtiGCFullMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@@ -78,7 +74,6 @@
   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-  notify_gc_end();
 }
 
 // Only used for System.gc() calls
@@ -91,8 +86,7 @@
 }
 
 void VM_ParallelGCSystemGC::doit() {
-  JvmtiGCFullMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
@@ -106,5 +100,4 @@
   } else {
     heap->invoke_full_gc(false);
   }
-  notify_gc_end();
 }
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -31,7 +31,6 @@
 #include "memory/oopFactory.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
-#include "prims/jvmtiExport.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -40,6 +39,7 @@
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #endif
+
 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
 HS_DTRACE_PROBE_DECL(hotspot, gc__end);
 
@@ -158,8 +158,7 @@
 
 
 void VM_GenCollectForAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
-  notify_gc_begin(false);
+  SvcGCMarker sgcm(SvcGCMarker::MINOR);
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, _gc_cause);
@@ -169,22 +168,19 @@
   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-  notify_gc_end();
 }
 
 void VM_GenCollectFull::doit() {
-  JvmtiGCFullMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, _gc_cause);
   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
-  notify_gc_end();
 }
 
 void VM_GenCollectForPermanentAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
+
   SharedHeap* heap = (SharedHeap*)Universe::heap();
   GCCauseSetter gccs(heap, _gc_cause);
   switch (heap->kind()) {
@@ -209,5 +205,4 @@
   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-  notify_gc_end();
 }
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -30,6 +30,7 @@
 #include "runtime/jniHandles.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/vm_operations.hpp"
+#include "prims/jvmtiExport.hpp"
 
 // The following class hierarchy represents
 // a set of operations (VM_Operation) related to GC.
@@ -209,13 +210,17 @@
   HeapWord* result() const       { return _res; }
 };
 
-class DTraceGCProbeMarker : public StackObj {
-public:
-  DTraceGCProbeMarker(bool full) {
-    VM_GC_Operation::notify_gc_begin(full);
+class SvcGCMarker : public StackObj {
+ private:
+  JvmtiGCMarker _jgcm;
+ public:
+  typedef enum { MINOR, FULL, OTHER } reason_type;
+
+  SvcGCMarker(reason_type reason ) {
+    VM_GC_Operation::notify_gc_begin(reason == FULL);
   }
 
-  ~DTraceGCProbeMarker() {
+  ~SvcGCMarker() {
     VM_GC_Operation::notify_gc_end();
   }
 };
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -884,7 +884,7 @@
   return mdo->bci_to_di(bci);
 IRT_END
 
-IRT_ENTRY(jint, InterpreterRuntime::profile_method(JavaThread* thread, address cur_bcp))
+IRT_ENTRY(void, InterpreterRuntime::profile_method(JavaThread* thread))
   // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
   // flag, in case this method triggers classloading which will call into Java.
   UnlockFlagSaver fs(thread);
@@ -893,16 +893,12 @@
   frame fr = thread->last_frame();
   assert(fr.is_interpreted_frame(), "must come from interpreter");
   methodHandle method(thread, fr.interpreter_frame_method());
-  int bci = method->bci_from(cur_bcp);
   methodOopDesc::build_interpreter_method_data(method, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
     CLEAR_PENDING_EXCEPTION;
     // and fall through...
   }
-  methodDataOop mdo = method->method_data();
-  if (mdo == NULL)  return 0;
-  return mdo->bci_to_di(bci);
 IRT_END
 
 
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -164,7 +164,7 @@
 
   // Interpreter profiling support
   static jint    bcp_to_di(methodOopDesc* method, address cur_bcp);
-  static jint    profile_method(JavaThread* thread, address cur_bcp);
+  static void    profile_method(JavaThread* thread);
   static void    update_mdp_for_ret(JavaThread* thread, int bci);
 #ifdef ASSERT
   static void    verify_mdp(methodOopDesc* method, address bcp, address mdp);
--- a/src/share/vm/oops/arrayKlass.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/oops/arrayKlass.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,8 @@
   friend class VMStructs;
  private:
   int      _dimension;         // This is n'th-dimensional array.
-  klassOop _higher_dimension;  // Refers the (n+1)'th-dimensional array (if present).
-  klassOop _lower_dimension;   // Refers the (n-1)'th-dimensional array (if present).
+  volatile klassOop _higher_dimension;  // Refers the (n+1)'th-dimensional array (if present).
+  volatile klassOop _lower_dimension;   // Refers the (n-1)'th-dimensional array (if present).
   int      _vtable_len;        // size of vtable for this klass
   juint    _alloc_size;        // allocation profiling support
   oop      _component_mirror;  // component type, as a java/lang/Class
--- a/src/share/vm/oops/objArrayKlass.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/oops/objArrayKlass.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -235,8 +235,9 @@
           objArrayKlassKlass::cast(Universe::objArrayKlassKlassObj())->
           allocate_objArray_klass(dimension + 1, this_oop, CHECK_NULL);
         ak = objArrayKlassHandle(THREAD, new_klass);
+        ak->set_lower_dimension(this_oop());
+        OrderAccess::storestore();
         this_oop->set_higher_dimension(ak());
-        ak->set_lower_dimension(this_oop());
         assert(ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");
       }
     }
--- a/src/share/vm/oops/typeArrayKlass.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -179,6 +179,7 @@
           dimension + 1, h_this, CHECK_NULL);
         h_ak = objArrayKlassHandle(THREAD, oak);
         h_ak->set_lower_dimension(h_this());
+        OrderAccess::storestore();
         h_this->set_higher_dimension(h_ak());
         assert(h_ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");
       }
--- a/src/share/vm/opto/type.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/opto/type.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2187,8 +2187,11 @@
   case TypePtr::NotNull:
     return this;
   case TypePtr::Null:
-  case TypePtr::Constant:
-    return make( _bits+offset );
+  case TypePtr::Constant: {
+    address bits = _bits+offset;
+    if ( bits == 0 ) return TypePtr::NULL_PTR;
+    return make( bits );
+  }
   default:  ShouldNotReachHere();
   }
   return NULL;                  // Lint noise
--- a/src/share/vm/prims/jvm.h	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvm.h	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1649,7 +1649,8 @@
      * the new bit is also added in the main/baseline.
      */
     unsigned int thread_park_blocker : 1;
-    unsigned int : 31;
+    unsigned int post_vm_init_hook_enabled : 1;
+    unsigned int : 30;
     unsigned int : 32;
     unsigned int : 32;
 } jdk_version_info;
--- a/src/share/vm/prims/jvmti.xml	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmti.xml	Thu Jan 13 22:54:23 2011 -0800
@@ -13048,8 +13048,8 @@
   <event label="Garbage Collection Start"
 	 id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81">
     <description>
-      A Garbage Collection Start event is sent when a full cycle
-      garbage collection begins.
+      A Garbage Collection Start event is sent when a 
+      garbage collection pause begins.
       Only stop-the-world collections are reported--that is, collections during
       which all threads cease to modify the state of the Java virtual machine.
       This means that some collectors will never generate these events.
@@ -13075,8 +13075,8 @@
   <event label="Garbage Collection Finish"
 	 id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82">
     <description>
-      A Garbage Collection Finish event is sent when a full 
-      garbage collection cycle ends.
+      A Garbage Collection Finish event is sent when a
+      garbage collection pause ends.
       This event is sent while the VM is still stopped, thus
       the event handler must not use JNI functions and
       must not use <jvmti/> functions except those which
--- a/src/share/vm/prims/jvmtiEventController.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmtiEventController.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -667,14 +667,13 @@
 JvmtiEventControllerPrivate::thread_ended(JavaThread *thread) {
   // Removes the JvmtiThreadState associated with the specified thread.
   // May be called after all environments have been disposed.
+  assert(JvmtiThreadState_lock->is_locked(), "sanity check");
 
   EC_TRACE(("JVMTI [%s] # thread ended", JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiThreadState *state = thread->jvmti_thread_state();
-  if (state != NULL) {
-    MutexLocker mu(JvmtiThreadState_lock);
-    delete state;
-  }
+  assert(state != NULL, "else why are we here?");
+  delete state;
 }
 
 void JvmtiEventControllerPrivate::set_event_callbacks(JvmtiEnvBase *env,
--- a/src/share/vm/prims/jvmtiExport.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmtiExport.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -2253,12 +2253,14 @@
 
 void JvmtiExport::cleanup_thread(JavaThread* thread) {
   assert(JavaThread::current() == thread, "thread is not current");
-
+  MutexLocker mu(JvmtiThreadState_lock);
 
-  // This has to happen after the thread state is removed, which is
-  // why it is not in post_thread_end_event like its complement
-  // Maybe both these functions should be rolled into the posts?
-  JvmtiEventController::thread_ended(thread);
+  if (thread->jvmti_thread_state() != NULL) {
+    // This has to happen after the thread state is removed, which is
+    // why it is not in post_thread_end_event like its complement
+    // Maybe both these functions should be rolled into the posts?
+    JvmtiEventController::thread_ended(thread);
+  }
 }
 
 void JvmtiExport::oops_do(OopClosure* f) {
@@ -2358,15 +2360,6 @@
 }
 #endif // SERVICES_KERNEL
 
-// CMS has completed referencing processing so may need to update
-// tag maps.
-void JvmtiExport::cms_ref_processing_epilogue() {
-  if (JvmtiEnv::environments_might_exist()) {
-    JvmtiTagMap::cms_ref_processing_epilogue();
-  }
-}
-
-
 ////////////////////////////////////////////////////////////////////////////////////////////////
 
 // Setup current current thread for event collection.
@@ -2536,36 +2529,20 @@
   }
 };
 
-JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) {
-  assert(Thread::current()->is_VM_thread(), "wrong thread");
-
+JvmtiGCMarker::JvmtiGCMarker() {
   // if there aren't any JVMTI environments then nothing to do
   if (!JvmtiEnv::environments_might_exist()) {
     return;
   }
 
-  if (ForceFullGCJVMTIEpilogues) {
-    // force 'Full GC' was done semantics for JVMTI GC epilogues
-    _full = true;
-  }
-
-  // GarbageCollectionStart event posted from VM thread - okay because
-  // JVMTI is clear that the "world is stopped" and callback shouldn't
-  // try to call into the VM.
   if (JvmtiExport::should_post_garbage_collection_start()) {
     JvmtiExport::post_garbage_collection_start();
   }
 
-  // if "full" is false it probably means this is a scavenge of the young
-  // generation. However it could turn out that a "full" GC is required
-  // so we record the number of collections so that it can be checked in
-  // the destructor.
-  if (!_full) {
-    _invocation_count = Universe::heap()->total_full_collections();
+  if (SafepointSynchronize::is_at_safepoint()) {
+    // Do clean up tasks that need to be done at a safepoint
+    JvmtiEnvBase::check_for_periodic_clean_up();
   }
-
-  // Do clean up tasks that need to be done at a safepoint
-  JvmtiEnvBase::check_for_periodic_clean_up();
 }
 
 JvmtiGCMarker::~JvmtiGCMarker() {
@@ -2578,21 +2555,5 @@
   if (JvmtiExport::should_post_garbage_collection_finish()) {
     JvmtiExport::post_garbage_collection_finish();
   }
-
-  // we might have initially started out doing a scavenge of the young
-  // generation but could have ended up doing a "full" GC - check the
-  // GC count to see.
-  if (!_full) {
-    _full = (_invocation_count != Universe::heap()->total_full_collections());
-  }
-
-  // Full collection probably means the perm generation has been GC'ed
-  // so we clear the breakpoint cache.
-  if (_full) {
-    JvmtiCurrentBreakpoints::gc_epilogue();
-  }
-
-  // Notify heap/object tagging support
-  JvmtiTagMap::gc_epilogue(_full);
 }
 #endif // JVMTI_KERNEL
--- a/src/share/vm/prims/jvmtiExport.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmtiExport.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -356,9 +356,6 @@
 
   // SetNativeMethodPrefix support
   static char** get_all_native_method_prefixes(int* count_ptr);
-
-  // call after CMS has completed referencing processing
-  static void cms_ref_processing_epilogue() KERNEL_RETURN;
 };
 
 // Support class used by JvmtiDynamicCodeEventCollector and others. It
@@ -492,55 +489,11 @@
 
 // Base class for reporting GC events to JVMTI.
 class JvmtiGCMarker : public StackObj {
- private:
-  bool _full;                           // marks a "full" GC
-  unsigned int _invocation_count;       // GC invocation count
- protected:
-  JvmtiGCMarker(bool full) KERNEL_RETURN;       // protected
-  ~JvmtiGCMarker() KERNEL_RETURN;               // protected
+ public:
+  JvmtiGCMarker() KERNEL_RETURN;
+  ~JvmtiGCMarker() KERNEL_RETURN;
 };
 
-
-// Support class used to report GC events to JVMTI. The class is stack
-// allocated and should be placed in the doit() implementation of all
-// vm operations that do a stop-the-world GC for failed allocation.
-//
-// Usage :-
-//
-// void VM_GenCollectForAllocation::doit() {
-//   JvmtiGCForAllocationMarker jgcm;
-//   :
-// }
-//
-// If jvmti is not enabled the constructor and destructor is essentially
-// a no-op (no overhead).
-//
-class JvmtiGCForAllocationMarker : public JvmtiGCMarker {
- public:
-  JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) {
-  }
-};
-
-// Support class used to report GC events to JVMTI. The class is stack
-// allocated and should be placed in the doit() implementation of all
-// vm operations that do a "full" stop-the-world GC. This class differs
-// from JvmtiGCForAllocationMarker in that this class assumes that a
-// "full" GC will happen.
-//
-// Usage :-
-//
-// void VM_GenCollectFull::doit() {
-//   JvmtiGCFullMarker jgcm;
-//   :
-// }
-//
-class JvmtiGCFullMarker : public JvmtiGCMarker {
- public:
-  JvmtiGCFullMarker() : JvmtiGCMarker(true) {
-  }
-};
-
-
 // JvmtiHideSingleStepping is a helper class for hiding
 // internal single step events.
 class JvmtiHideSingleStepping : public StackObj {
--- a/src/share/vm/prims/jvmtiImpl.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -212,14 +212,7 @@
   for (int i=0; i<len; i++) {
     GrowableElement *e = _elements->at(i);
     e->oops_do(f);
-  }
-}
-
-void GrowableCache::gc_epilogue() {
-  int len = _elements->length();
-  // recompute the new cache value after GC
-  for (int i=0; i<len; i++) {
-    _cache[i] = _elements->at(i)->getCacheValue();
+    _cache[i] = e->getCacheValue();
   }
 }
 
@@ -401,10 +394,6 @@
   _bps.oops_do(f);
 }
 
-void  JvmtiBreakpoints::gc_epilogue() {
-  _bps.gc_epilogue();
-}
-
 void  JvmtiBreakpoints::print() {
 #ifndef PRODUCT
   ResourceMark rm;
@@ -534,13 +523,6 @@
   }
 }
 
-void JvmtiCurrentBreakpoints::gc_epilogue() {
-  if (_jvmti_breakpoints != NULL) {
-    _jvmti_breakpoints->gc_epilogue();
-  }
-}
-
-
 ///////////////////////////////////////////////////////////////
 //
 // class VM_GetOrSetLocal
--- a/src/share/vm/prims/jvmtiImpl.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmtiImpl.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -117,7 +117,6 @@
   void clear();
   // apply f to every element and update the cache
   void oops_do(OopClosure* f);
-  void gc_epilogue();
 };
 
 
@@ -149,7 +148,6 @@
   void remove (int index)               { _cache.remove(index); }
   void clear()                          { _cache.clear(); }
   void oops_do(OopClosure* f)           { _cache.oops_do(f); }
-  void gc_epilogue()                    { _cache.gc_epilogue(); }
 };
 
 
@@ -278,7 +276,6 @@
 
   int length();
   void oops_do(OopClosure* f);
-  void gc_epilogue();
   void print();
 
   int  set(JvmtiBreakpoint& bp);
@@ -328,7 +325,6 @@
   static inline bool is_breakpoint(address bcp);
 
   static void oops_do(OopClosure* f);
-  static void gc_epilogue();
 };
 
 // quickly test whether the bcp matches a cached breakpoint in the list
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -50,7 +50,7 @@
 
 // JvmtiTagHashmapEntry
 //
-// Each entry encapsulates a JNI weak reference to the tagged object
+// Each entry encapsulates a reference to the tagged object
 // and the tag value. In addition an entry includes a next pointer which
 // is used to chain entries together.
 
@@ -58,24 +58,25 @@
  private:
   friend class JvmtiTagMap;
 
-  jweak _object;                        // JNI weak ref to tagged object
+  oop _object;                          // tagged object
   jlong _tag;                           // the tag
   JvmtiTagHashmapEntry* _next;          // next on the list
 
-  inline void init(jweak object, jlong tag) {
+  inline void init(oop object, jlong tag) {
     _object = object;
     _tag = tag;
     _next = NULL;
   }
 
   // constructor
-  JvmtiTagHashmapEntry(jweak object, jlong tag)         { init(object, tag); }
+  JvmtiTagHashmapEntry(oop object, jlong tag)         { init(object, tag); }
 
  public:
 
   // accessor methods
-  inline jweak object() const                           { return _object; }
-  inline jlong tag() const                              { return _tag; }
+  inline oop object() const                           { return _object; }
+  inline oop* object_addr()                           { return &_object; }
+  inline jlong tag() const                            { return _tag; }
 
   inline void set_tag(jlong tag) {
     assert(tag != 0, "can't be zero");
@@ -92,9 +93,7 @@
 // A hashmap is essentially a table of pointers to entries. Entries
 // are hashed to a location, or position in the table, and then
 // chained from that location. The "key" for hashing is address of
-// the object, or oop. The "value" is the JNI weak reference to the
-// object and the tag value. Keys are not stored with the entry.
-// Instead the weak reference is resolved to obtain the key.
+// the object, or oop. The "value" is the tag value.
 //
 // A hashmap maintains a count of the number entries in the hashmap
 // and resizes if the number of entries exceeds a given threshold.
@@ -206,7 +205,7 @@
       JvmtiTagHashmapEntry* entry = _table[i];
       while (entry != NULL) {
         JvmtiTagHashmapEntry* next = entry->next();
-        oop key = JNIHandles::resolve(entry->object());
+        oop key = entry->object();
         assert(key != NULL, "jni weak reference cleared!!");
         unsigned int h = hash(key, new_size);
         JvmtiTagHashmapEntry* anchor = new_table[h];
@@ -299,14 +298,12 @@
     unsigned int h = hash(key);
     JvmtiTagHashmapEntry* entry = _table[h];
     while (entry != NULL) {
-      oop orig_key = JNIHandles::resolve(entry->object());
-      assert(orig_key != NULL, "jni weak reference cleared!!");
-      if (key == orig_key) {
-        break;
+      if (entry->object() == key) {
+         return entry;
       }
       entry = entry->next();
     }
-    return entry;
+    return NULL;
   }
 
 
@@ -343,9 +340,7 @@
     JvmtiTagHashmapEntry* entry = _table[h];
     JvmtiTagHashmapEntry* prev = NULL;
     while (entry != NULL) {
-      oop orig_key = JNIHandles::resolve(entry->object());
-      assert(orig_key != NULL, "jni weak reference cleared!!");
-      if (key == orig_key) {
+      if (key == entry->object()) {
         break;
       }
       prev = entry;
@@ -418,54 +413,6 @@
   }
 }
 
-// memory region for young generation
-MemRegion JvmtiTagMap::_young_gen;
-
-// get the memory region used for the young generation
-void JvmtiTagMap::get_young_generation() {
-  CollectedHeap* ch = Universe::heap();
-  switch (ch->kind()) {
-    case (CollectedHeap::GenCollectedHeap): {
-      _young_gen = ((GenCollectedHeap*)ch)->get_gen(0)->reserved();
-      break;
-    }
-#ifndef SERIALGC
-    case (CollectedHeap::ParallelScavengeHeap): {
-      _young_gen = ((ParallelScavengeHeap*)ch)->young_gen()->reserved();
-      break;
-    }
-    case (CollectedHeap::G1CollectedHeap): {
-      // Until a more satisfactory solution is implemented, all
-      // oops in the tag map will require rehash at each gc.
-      // This is a correct, if extremely inefficient solution.
-      // See RFE 6621729 for related commentary.
-      _young_gen = ch->reserved_region();
-      break;
-    }
-#endif  // !SERIALGC
-    default:
-      ShouldNotReachHere();
-  }
-}
-
-// returns true if oop is in the young generation
-inline bool JvmtiTagMap::is_in_young(oop o) {
-  assert(_young_gen.start() != NULL, "checking");
-  void* p = (void*)o;
-  bool in_young = _young_gen.contains(p);
-  return in_young;
-}
-
-// returns the appropriate hashmap for a given object
-inline JvmtiTagHashmap* JvmtiTagMap::hashmap_for(oop o) {
-  if (is_in_young(o)) {
-    return _hashmap[0];
-  } else {
-    return _hashmap[1];
-  }
-}
-
-
 // create a JvmtiTagMap
 JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
   _env(env),
@@ -476,13 +423,7 @@
   assert(JvmtiThreadState_lock->is_locked(), "sanity check");
   assert(((JvmtiEnvBase *)env)->tag_map() == NULL, "tag map already exists for environment");
 
-  // create the hashmaps
-  for (int i=0; i<n_hashmaps; i++) {
-    _hashmap[i] = new JvmtiTagHashmap();
-  }
-
-  // get the memory region used by the young generation
-  get_young_generation();
+  _hashmap = new JvmtiTagHashmap();
 
   // finally add us to the environment
   ((JvmtiEnvBase *)env)->set_tag_map(this);
@@ -496,25 +437,20 @@
   // also being destroryed.
   ((JvmtiEnvBase *)_env)->set_tag_map(NULL);
 
-  // iterate over the hashmaps and destroy each of the entries
-  for (int i=0; i<n_hashmaps; i++) {
-    JvmtiTagHashmap* hashmap = _hashmap[i];
-    JvmtiTagHashmapEntry** table = hashmap->table();
-    for (int j=0; j<hashmap->size(); j++) {
-      JvmtiTagHashmapEntry *entry = table[j];
-      while (entry != NULL) {
-        JvmtiTagHashmapEntry* next = entry->next();
-        jweak ref = entry->object();
-        JNIHandles::destroy_weak_global(ref);
-        delete entry;
-        entry = next;
-      }
+  JvmtiTagHashmapEntry** table = _hashmap->table();
+  for (int j = 0; j < _hashmap->size(); j++) {
+    JvmtiTagHashmapEntry* entry = table[j];
+    while (entry != NULL) {
+      JvmtiTagHashmapEntry* next = entry->next();
+      delete entry;
+      entry = next;
     }
-
-    // finally destroy the hashmap
-    delete hashmap;
   }
 
+  // finally destroy the hashmap
+  delete _hashmap;
+  _hashmap = NULL;
+
   // remove any entries on the free list
   JvmtiTagHashmapEntry* entry = _free_entries;
   while (entry != NULL) {
@@ -522,12 +458,13 @@
     delete entry;
     entry = next;
   }
+  _free_entries = NULL;
 }
 
 // create a hashmap entry
 // - if there's an entry on the (per-environment) free list then this
 // is returned. Otherwise an new entry is allocated.
-JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(jweak ref, jlong tag) {
+JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(oop ref, jlong tag) {
   assert(Thread::current()->is_VM_thread() || is_locked(), "checking");
   JvmtiTagHashmapEntry* entry;
   if (_free_entries == NULL) {
@@ -558,10 +495,10 @@
 // returns the tag map for the given environments. If the tag map
 // doesn't exist then it is created.
 JvmtiTagMap* JvmtiTagMap::tag_map_for(JvmtiEnv* env) {
-  JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map();
+  JvmtiTagMap* tag_map = ((JvmtiEnvBase*)env)->tag_map();
   if (tag_map == NULL) {
     MutexLocker mu(JvmtiThreadState_lock);
-    tag_map = ((JvmtiEnvBase *)env)->tag_map();
+    tag_map = ((JvmtiEnvBase*)env)->tag_map();
     if (tag_map == NULL) {
       tag_map = new JvmtiTagMap(env);
     }
@@ -573,17 +510,13 @@
 
 // iterate over all entries in the tag map.
 void JvmtiTagMap::entry_iterate(JvmtiTagHashmapEntryClosure* closure) {
-  for (int i=0; i<n_hashmaps; i++) {
-    JvmtiTagHashmap* hashmap = _hashmap[i];
-    hashmap->entry_iterate(closure);
-  }
+  hashmap()->entry_iterate(closure);
 }
 
 // returns true if the hashmaps are empty
 bool JvmtiTagMap::is_empty() {
   assert(SafepointSynchronize::is_at_safepoint() || is_locked(), "checking");
-  assert(n_hashmaps == 2, "not implemented");
-  return ((_hashmap[0]->entry_count() == 0) && (_hashmap[1]->entry_count() == 0));
+  return hashmap()->entry_count() == 0;
 }
 
 
@@ -591,7 +524,7 @@
 // not tagged
 //
 static inline jlong tag_for(JvmtiTagMap* tag_map, oop o) {
-  JvmtiTagHashmapEntry* entry = tag_map->hashmap_for(o)->find(o);
+  JvmtiTagHashmapEntry* entry = tag_map->hashmap()->find(o);
   if (entry == NULL) {
     return 0;
   } else {
@@ -655,7 +588,7 @@
 
     // record the context
     _tag_map = tag_map;
-    _hashmap = tag_map->hashmap_for(_o);
+    _hashmap = tag_map->hashmap();
     _entry = _hashmap->find(_o);
 
     // get object tag
@@ -694,23 +627,18 @@
     if (obj_tag != 0) {
       // callback has tagged the object
       assert(Thread::current()->is_VM_thread(), "must be VMThread");
-      HandleMark hm;
-      Handle h(o);
-      jweak ref = JNIHandles::make_weak_global(h);
-      entry = tag_map()->create_entry(ref, obj_tag);
+      entry = tag_map()->create_entry(o, obj_tag);
       hashmap->add(o, entry);
     }
   } else {
     // object was previously tagged - the callback may have untagged
     // the object or changed the tag value
     if (obj_tag == 0) {
-      jweak ref = entry->object();
 
       JvmtiTagHashmapEntry* entry_removed = hashmap->remove(o);
       assert(entry_removed == entry, "checking");
       tag_map()->destroy_entry(entry);
 
-      JNIHandles::destroy_weak_global(ref);
     } else {
       if (obj_tag != entry->tag()) {
          entry->set_tag(obj_tag);
@@ -760,7 +688,7 @@
       // for Classes the klassOop is tagged
       _referrer = klassOop_if_java_lang_Class(referrer);
       // record the context
-      _referrer_hashmap = tag_map->hashmap_for(_referrer);
+      _referrer_hashmap = tag_map->hashmap();
       _referrer_entry = _referrer_hashmap->find(_referrer);
 
       // get object tag
@@ -796,8 +724,7 @@
 //
 // This function is performance critical. If many threads attempt to tag objects
 // around the same time then it's possible that the Mutex associated with the
-// tag map will be a hot lock. Eliminating this lock will not eliminate the issue
-// because creating a JNI weak reference requires acquiring a global lock also.
+// tag map will be a hot lock.
 void JvmtiTagMap::set_tag(jobject object, jlong tag) {
   MutexLocker ml(lock());
 
@@ -808,22 +735,14 @@
   o = klassOop_if_java_lang_Class(o);
 
   // see if the object is already tagged
-  JvmtiTagHashmap* hashmap = hashmap_for(o);
+  JvmtiTagHashmap* hashmap = _hashmap;
   JvmtiTagHashmapEntry* entry = hashmap->find(o);
 
   // if the object is not already tagged then we tag it
   if (entry == NULL) {
     if (tag != 0) {
-      HandleMark hm;
-      Handle h(o);
-      jweak ref = JNIHandles::make_weak_global(h);
-
-      // the object may have moved because make_weak_global may
-      // have blocked - thus it is necessary resolve the handle
-      // and re-hash the object.
-      o = h();
-      entry = create_entry(ref, tag);
-      hashmap_for(o)->add(o, entry);
+      entry = create_entry(o, tag);
+      hashmap->add(o, entry);
     } else {
       // no-op
     }
@@ -831,13 +750,9 @@
     // if the object is already tagged then we either update
     // the tag (if a new tag value has been provided)
     // or remove the object if the new tag value is 0.
-    // Removing the object requires that we also delete the JNI
-    // weak ref to the object.
     if (tag == 0) {
-      jweak ref = entry->object();
       hashmap->remove(o);
       destroy_entry(entry);
-      JNIHandles::destroy_weak_global(ref);
     } else {
       entry->set_tag(tag);
     }
@@ -1626,8 +1541,8 @@
   void do_entry(JvmtiTagHashmapEntry* entry) {
     for (int i=0; i<_tag_count; i++) {
       if (_tags[i] == entry->tag()) {
-        oop o = JNIHandles::resolve(entry->object());
-        assert(o != NULL && o != JNIHandles::deleted_handle(), "sanity check");
+        oop o = entry->object();
+        assert(o != NULL, "sanity check");
 
         // the mirror is tagged
         if (o->is_klass()) {
@@ -3374,62 +3289,21 @@
 }
 
 
-// called post-GC
-// - for each JVMTI environment with an object tag map, call its rehash
-// function to re-sync with the new object locations.
-void JvmtiTagMap::gc_epilogue(bool full) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
+void JvmtiTagMap::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  assert(SafepointSynchronize::is_at_safepoint(),
+         "must be executed at a safepoint");
   if (JvmtiEnv::environments_might_exist()) {
-    // re-obtain the memory region for the young generation (might
-    // changed due to adaptive resizing policy)
-    get_young_generation();
-
     JvmtiEnvIterator it;
     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
       JvmtiTagMap* tag_map = env->tag_map();
       if (tag_map != NULL && !tag_map->is_empty()) {
-        TraceTime t(full ? "JVMTI Full Rehash " : "JVMTI Rehash ", TraceJVMTIObjectTagging);
-        if (full) {
-          tag_map->rehash(0, n_hashmaps);
-        } else {
-          tag_map->rehash(0, 0);        // tag map for young gen only
-        }
+        tag_map->do_weak_oops(is_alive, f);
       }
     }
   }
 }
 
-// CMS has completed referencing processing so we may have JNI weak refs
-// to objects in the CMS generation that have been GC'ed.
-void JvmtiTagMap::cms_ref_processing_epilogue() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
-  assert(UseConcMarkSweepGC, "should only be used with CMS");
-  if (JvmtiEnv::environments_might_exist()) {
-    JvmtiEnvIterator it;
-    for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
-      JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map();
-      if (tag_map != NULL && !tag_map->is_empty()) {
-        TraceTime t("JVMTI Rehash (CMS) ", TraceJVMTIObjectTagging);
-        tag_map->rehash(1, n_hashmaps);    // assume CMS not used in young gen
-      }
-    }
-  }
-}
-
-
-// For each entry in the hashmaps 'start' to 'end' :
-//
-// 1. resolve the JNI weak reference
-//
-// 2. If it resolves to NULL it means the object has been freed so the entry
-//    is removed, the weak reference destroyed, and the object free event is
-//    posted (if enabled).
-//
-// 3. If the weak reference resolves to an object then we re-hash the object
-//    to see if it has moved or has been promoted (from the young to the old
-//    generation for example).
-//
-void JvmtiTagMap::rehash(int start, int end) {
+void JvmtiTagMap::do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f) {
 
   // does this environment have the OBJECT_FREE event enabled
   bool post_object_free = env()->is_enabled(JVMTI_EVENT_OBJECT_FREE);
@@ -3437,143 +3311,98 @@
   // counters used for trace message
   int freed = 0;
   int moved = 0;
-  int promoted = 0;
-
-  // we assume there are two hashmaps - one for the young generation
-  // and the other for all other spaces.
-  assert(n_hashmaps == 2, "not implemented");
-  JvmtiTagHashmap* young_hashmap = _hashmap[0];
-  JvmtiTagHashmap* other_hashmap = _hashmap[1];
+
+  JvmtiTagHashmap* hashmap = this->hashmap();
 
   // reenable sizing (if disabled)
-  young_hashmap->set_resizing_enabled(true);
-  other_hashmap->set_resizing_enabled(true);
-
-  // when re-hashing the hashmap corresponding to the young generation we
-  // collect the entries corresponding to objects that have been promoted.
-  JvmtiTagHashmapEntry* promoted_entries = NULL;
-
-  if (end >= n_hashmaps) {
-    end = n_hashmaps - 1;
+  hashmap->set_resizing_enabled(true);
+
+  // if the hashmap is empty then we can skip it
+  if (hashmap->_entry_count == 0) {
+    return;
   }
 
-  for (int i=start; i <= end; i++) {
-    JvmtiTagHashmap* hashmap = _hashmap[i];
-
-    // if the hashmap is empty then we can skip it
-    if (hashmap->_entry_count == 0) {
-      continue;
-    }
-
-    // now iterate through each entry in the table
-
-    JvmtiTagHashmapEntry** table = hashmap->table();
-    int size = hashmap->size();
-
-    for (int pos=0; pos<size; pos++) {
-      JvmtiTagHashmapEntry* entry = table[pos];
-      JvmtiTagHashmapEntry* prev = NULL;
-
-      while (entry != NULL) {
-        JvmtiTagHashmapEntry* next = entry->next();
-
-        jweak ref = entry->object();
-        oop oop = JNIHandles::resolve(ref);
-
-        // has object been GC'ed
-        if (oop == NULL) {
-          // grab the tag
-          jlong tag = entry->tag();
-          guarantee(tag != 0, "checking");
-
-          // remove GC'ed entry from hashmap and return the
-          // entry to the free list
-          hashmap->remove(prev, pos, entry);
-          destroy_entry(entry);
-
-          // destroy the weak ref
-          JNIHandles::destroy_weak_global(ref);
-
-          // post the event to the profiler
-          if (post_object_free) {
-            JvmtiExport::post_object_free(env(), tag);
+  // now iterate through each entry in the table
+
+  JvmtiTagHashmapEntry** table = hashmap->table();
+  int size = hashmap->size();
+
+  JvmtiTagHashmapEntry* delayed_add = NULL;
+
+  for (int pos = 0; pos < size; ++pos) {
+    JvmtiTagHashmapEntry* entry = table[pos];
+    JvmtiTagHashmapEntry* prev = NULL;
+
+    while (entry != NULL) {
+      JvmtiTagHashmapEntry* next = entry->next();
+
+      oop* obj = entry->object_addr();
+
+      // has object been GC'ed
+      if (!is_alive->do_object_b(entry->object())) {
+        // grab the tag
+        jlong tag = entry->tag();
+        guarantee(tag != 0, "checking");
+
+        // remove GC'ed entry from hashmap and return the
+        // entry to the free list
+        hashmap->remove(prev, pos, entry);
+        destroy_entry(entry);
+
+        // post the event to the profiler
+        if (post_object_free) {
+          JvmtiExport::post_object_free(env(), tag);
+        }
+
+        ++freed;
+      } else {
+        f->do_oop(entry->object_addr());
+        oop new_oop = entry->object();
+
+        // if the object has moved then re-hash it and move its
+        // entry to its new location.
+        unsigned int new_pos = JvmtiTagHashmap::hash(new_oop, size);
+        if (new_pos != (unsigned int)pos) {
+          if (prev == NULL) {
+            table[pos] = next;
+          } else {
+            prev->set_next(next);
           }
-
-          freed++;
-          entry = next;
-          continue;
-        }
-
-        // if this is the young hashmap then the object is either promoted
-        // or moved.
-        // if this is the other hashmap then the object is moved.
-
-        bool same_gen;
-        if (i == 0) {
-          assert(hashmap == young_hashmap, "checking");
-          same_gen = is_in_young(oop);
-        } else {
-          same_gen = true;
-        }
-
-
-        if (same_gen) {
-          // if the object has moved then re-hash it and move its
-          // entry to its new location.
-          unsigned int new_pos = JvmtiTagHashmap::hash(oop, size);
-          if (new_pos != (unsigned int)pos) {
-            if (prev == NULL) {
-              table[pos] = next;
-            } else {
-              prev->set_next(next);
-            }
+          if (new_pos < (unsigned int)pos) {
             entry->set_next(table[new_pos]);
             table[new_pos] = entry;
-            moved++;
           } else {
-            // object didn't move
-            prev = entry;
+            // Delay adding this entry to it's new position as we'd end up
+            // hitting it again during this iteration.
+            entry->set_next(delayed_add);
+            delayed_add = entry;
           }
+          moved++;
         } else {
-          // object has been promoted so remove the entry from the
-          // young hashmap
-          assert(hashmap == young_hashmap, "checking");
-          hashmap->remove(prev, pos, entry);
-
-          // move the entry to the promoted list
-          entry->set_next(promoted_entries);
-          promoted_entries = entry;
+          // object didn't move
+          prev = entry;
         }
-
-        entry = next;
       }
+
+      entry = next;
     }
   }
 
-
-  // add the entries, corresponding to the promoted objects, to the
-  // other hashmap.
-  JvmtiTagHashmapEntry* entry = promoted_entries;
-  while (entry != NULL) {
-    oop o = JNIHandles::resolve(entry->object());
-    assert(hashmap_for(o) == other_hashmap, "checking");
-    JvmtiTagHashmapEntry* next = entry->next();
-    other_hashmap->add(o, entry);
-    entry = next;
-    promoted++;
+  // Re-add all the entries which were kept aside
+  while (delayed_add != NULL) {
+    JvmtiTagHashmapEntry* next = delayed_add->next();
+    unsigned int pos = JvmtiTagHashmap::hash(delayed_add->object(), size);
+    delayed_add->set_next(table[pos]);
+    table[pos] = delayed_add;
+    delayed_add = next;
   }
 
   // stats
   if (TraceJVMTIObjectTagging) {
-    int total_moves = promoted + moved;
-
-    int post_total = 0;
-    for (int i=0; i<n_hashmaps; i++) {
-      post_total += _hashmap[i]->_entry_count;
-    }
+    int post_total = hashmap->_entry_count;
     int pre_total = post_total + freed;
 
-    tty->print("(%d->%d, %d freed, %d promoted, %d total moves)",
-        pre_total, post_total, freed, promoted, total_moves);
+    tty->print_cr("(%d->%d, %d freed, %d total moves)",
+        pre_total, post_total, freed, moved);
   }
 }
--- a/src/share/vm/prims/jvmtiTagMap.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/jvmtiTagMap.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -45,17 +45,12 @@
  private:
 
   enum{
-    n_hashmaps = 2,                                 // encapsulates 2 hashmaps
-    max_free_entries = 4096                         // maximum number of free entries per env
+    max_free_entries = 4096         // maximum number of free entries per env
   };
 
-  // memory region for young generation
-  static MemRegion _young_gen;
-  static void get_young_generation();
-
   JvmtiEnv*             _env;                       // the jvmti environment
   Mutex                 _lock;                      // lock for this tag map
-  JvmtiTagHashmap*      _hashmap[n_hashmaps];       // the hashmaps
+  JvmtiTagHashmap*      _hashmap;                   // the hashmap
 
   JvmtiTagHashmapEntry* _free_entries;              // free list for this environment
   int _free_entries_count;                          // number of entries on the free list
@@ -67,11 +62,7 @@
   inline Mutex* lock()                      { return &_lock; }
   inline JvmtiEnv* env() const              { return _env; }
 
-  // rehash tags maps for generation start to end
-  void rehash(int start, int end);
-
-  // indicates if the object is in the young generation
-  static bool is_in_young(oop o);
+  void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
 
   // iterate over all entries in this tag map
   void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
@@ -81,11 +72,10 @@
   // indicates if this tag map is locked
   bool is_locked()                          { return lock()->is_locked(); }
 
-  // return the appropriate hashmap for a given object
-  JvmtiTagHashmap* hashmap_for(oop o);
+  JvmtiTagHashmap* hashmap() { return _hashmap; }
 
   // create/destroy entries
-  JvmtiTagHashmapEntry* create_entry(jweak ref, jlong tag);
+  JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag);
   void destroy_entry(JvmtiTagHashmapEntry* entry);
 
   // returns true if the hashmaps are empty
@@ -134,11 +124,8 @@
                                    jint* count_ptr, jobject** object_result_ptr,
                                    jlong** tag_result_ptr);
 
-  // call post-GC to rehash the tag maps.
-  static void gc_epilogue(bool full);
-
-  // call after referencing processing has completed (CMS)
-  static void cms_ref_processing_epilogue();
+  static void weak_oops_do(
+      BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN;
 };
 
 #endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP
--- a/src/share/vm/prims/methodHandleWalk.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,7 +137,6 @@
 
 
 void MethodHandleChain::lose(const char* msg, TRAPS) {
-  assert(false, "lose");
   _lose_message = msg;
   if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) {
     // throw a preallocated exception
--- a/src/share/vm/prims/methodHandles.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/methodHandles.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,7 +111,7 @@
 //------------------------------------------------------------------------------
 // MethodHandles::generate_adapters
 //
-void MethodHandles::generate_adapters(TRAPS) {
+void MethodHandles::generate_adapters() {
   if (!EnableMethodHandles || SystemDictionary::MethodHandle_klass() == NULL)  return;
 
   assert(_adapter_code == NULL, "generate only once");
@@ -123,20 +123,20 @@
     vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
   CodeBuffer code(_adapter_code);
   MethodHandlesAdapterGenerator g(&code);
-  g.generate(CHECK);
+  g.generate();
 }
 
 
 //------------------------------------------------------------------------------
 // MethodHandlesAdapterGenerator::generate
 //
-void MethodHandlesAdapterGenerator::generate(TRAPS) {
+void MethodHandlesAdapterGenerator::generate() {
   // Generate generic method handle adapters.
   for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
        ek < MethodHandles::_EK_LIMIT;
        ek = MethodHandles::EntryKind(1 + (int)ek)) {
     StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
-    MethodHandles::generate_method_handle_stub(_masm, ek, CHECK);
+    MethodHandles::generate_method_handle_stub(_masm, ek);
   }
 }
 
@@ -2621,10 +2621,20 @@
         warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
         enable_MH = false;
       }
+    } else {
+      enable_MH = false;
     }
   }
 
   if (enable_MH) {
+    // We need to link the MethodHandleImpl klass before we generate
+    // the method handle adapters as the _raise_exception adapter uses
+    // one of its methods (and its c2i-adapter).
+    KlassHandle    k  = SystemDictionaryHandles::MethodHandleImpl_klass();
+    instanceKlass* ik = instanceKlass::cast(k());
+    ik->link_class(CHECK);
+
+    MethodHandles::generate_adapters();
     MethodHandles::set_enabled(true);
   }
 
@@ -2645,10 +2655,5 @@
       MethodHandles::set_enabled(true);
     }
   }
-
-  // Generate method handles adapters if enabled.
-  if (MethodHandles::enabled()) {
-    MethodHandles::generate_adapters(CHECK);
-  }
 }
 JVM_END
--- a/src/share/vm/prims/methodHandles.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/methodHandles.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,11 +294,11 @@
   enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
 
   // Generate MethodHandles adapters.
-  static void generate_adapters(TRAPS);
+  static void generate_adapters();
 
   // Called from InterpreterGenerator and MethodHandlesAdapterGenerator.
   static address generate_method_handle_interpreter_entry(MacroAssembler* _masm);
-  static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek, TRAPS);
+  static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek);
 
   // argument list parsing
   static int argument_slot(oop method_type, int arg);
@@ -530,7 +530,7 @@
 public:
   MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code) {}
 
-  void generate(TRAPS);
+  void generate();
 };
 
 #endif // SHARE_VM_PRIMS_METHODHANDLES_HPP
--- a/src/share/vm/prims/unsafe.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/prims/unsafe.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -154,12 +154,11 @@
 
 #define GET_FIELD_VOLATILE(obj, offset, type_name, v) \
   oop p = JNIHandles::resolve(obj); \
-  volatile type_name v = *(volatile type_name*)index_oop_from_field_offset_long(p, offset)
+  volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset));
 
 #define SET_FIELD_VOLATILE(obj, offset, type_name, x) \
   oop p = JNIHandles::resolve(obj); \
-  *(volatile type_name*)index_oop_from_field_offset_long(p, offset) = x; \
-  OrderAccess::fence();
+  OrderAccess::release_store_fence((volatile type_name*)index_oop_from_field_offset_long(p, offset), x);
 
 // Macros for oops that check UseCompressedOops
 
@@ -181,7 +180,8 @@
     v = oopDesc::decode_heap_oop(n);                               \
   } else {                            \
     v = *(volatile oop*)index_oop_from_field_offset_long(p, offset);       \
-  }
+  } \
+  OrderAccess::acquire();
 
 
 // Get/SetObject must be special-cased, since it works with handles.
@@ -248,14 +248,22 @@
   UnsafeWrapper("Unsafe_SetObjectVolatile");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
+  void* addr = index_oop_from_field_offset_long(p, offset);
+  OrderAccess::release();
   if (UseCompressedOops) {
-    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((narrowOop*)addr, x);
   } else {
-    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((oop*)addr, x);
   }
   OrderAccess::fence();
 UNSAFE_END
 
+#if defined(SPARC) || defined(X86)
+// Sparc and X86 have atomic jlong (8 bytes) instructions
+
+#else
+// Keep old code for platforms which may not have atomic jlong (8 bytes) instructions
+
 // Volatile long versions must use locks if !VM_Version::supports_cx8().
 // support_cx8 is a surrogate for 'supports atomic long memory ops'.
 
@@ -291,6 +299,7 @@
   }
 UNSAFE_END
 
+#endif // not SPARC and not X86
 
 #define DEFINE_GETSETOOP(jboolean, Boolean) \
  \
@@ -320,6 +329,16 @@
  \
 // END DEFINE_GETSETOOP.
 
+DEFINE_GETSETOOP(jboolean, Boolean)
+DEFINE_GETSETOOP(jbyte, Byte)
+DEFINE_GETSETOOP(jshort, Short);
+DEFINE_GETSETOOP(jchar, Char);
+DEFINE_GETSETOOP(jint, Int);
+DEFINE_GETSETOOP(jlong, Long);
+DEFINE_GETSETOOP(jfloat, Float);
+DEFINE_GETSETOOP(jdouble, Double);
+
+#undef DEFINE_GETSETOOP
 
 #define DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean) \
  \
@@ -336,47 +355,49 @@
  \
 // END DEFINE_GETSETOOP_VOLATILE.
 
-DEFINE_GETSETOOP(jboolean, Boolean)
-DEFINE_GETSETOOP(jbyte, Byte)
-DEFINE_GETSETOOP(jshort, Short);
-DEFINE_GETSETOOP(jchar, Char);
-DEFINE_GETSETOOP(jint, Int);
-DEFINE_GETSETOOP(jlong, Long);
-DEFINE_GETSETOOP(jfloat, Float);
-DEFINE_GETSETOOP(jdouble, Double);
-
 DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean)
 DEFINE_GETSETOOP_VOLATILE(jbyte, Byte)
 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
 DEFINE_GETSETOOP_VOLATILE(jint, Int);
-// no long -- handled specially
 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
 
-#undef DEFINE_GETSETOOP
+#if defined(SPARC) || defined(X86)
+// Sparc and X86 have atomic jlong (8 bytes) instructions
+DEFINE_GETSETOOP_VOLATILE(jlong, Long);
+#endif
+
+#undef DEFINE_GETSETOOP_VOLATILE
 
 // The non-intrinsified versions of setOrdered just use setVolatile
 
-UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x)) \
-  UnsafeWrapper("Unsafe_SetOrderedInt"); \
-  SET_FIELD_VOLATILE(obj, offset, jint, x); \
+UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x))
+  UnsafeWrapper("Unsafe_SetOrderedInt");
+  SET_FIELD_VOLATILE(obj, offset, jint, x);
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
   UnsafeWrapper("Unsafe_SetOrderedObject");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
+  void* addr = index_oop_from_field_offset_long(p, offset);
+  OrderAccess::release();
   if (UseCompressedOops) {
-    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((narrowOop*)addr, x);
   } else {
-    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((oop*)addr, x);
   }
   OrderAccess::fence();
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x))
   UnsafeWrapper("Unsafe_SetOrderedLong");
+#if defined(SPARC) || defined(X86)
+  // Sparc and X86 have atomic jlong (8 bytes) instructions
+  SET_FIELD_VOLATILE(obj, offset, jlong, x);
+#else
+  // Keep old code for platforms which may not have atomic long (8 bytes) instructions
   {
     if (VM_Version::supports_cx8()) {
       SET_FIELD_VOLATILE(obj, offset, jlong, x);
@@ -388,6 +409,7 @@
       *addr = x;
     }
   }
+#endif
 UNSAFE_END
 
 ////// Data in the C heap.
--- a/src/share/vm/runtime/arguments.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -918,9 +918,7 @@
   } else if (strcmp(key, "sun.java.command") == 0) {
     _java_command = value;
 
-    // don't add this property to the properties exposed to the java application
-    FreeHeap(key);
-    return true;
+    // Record value in Arguments, but let it get passed to Java.
   } else if (strcmp(key, "sun.java.launcher.pid") == 0) {
     // launcher.pid property is private and is processed
     // in process_sun_java_launcher_properties();
--- a/src/share/vm/runtime/globals.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/globals.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1198,9 +1198,6 @@
   product(ccstr, TraceJVMTI, NULL,                                          \
           "Trace flags for JVMTI functions and events")                     \
                                                                             \
-  product(bool, ForceFullGCJVMTIEpilogues, false,                           \
-          "Force 'Full GC' was done semantics for JVMTI GC epilogues")      \
-                                                                            \
   /* This option can change an EMCP method into an obsolete method. */      \
   /* This can affect tests that except specific methods to be EMCP. */      \
   /* This option should be used with caution. */                            \
--- a/src/share/vm/runtime/java.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/java.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -663,7 +663,8 @@
     }
     _current = JDK_Version(major, minor, micro, info.update_version,
                            info.special_update_version, build,
-                           info.thread_park_blocker == 1);
+                           info.thread_park_blocker == 1,
+                           info.post_vm_init_hook_enabled == 1);
   }
 }
 
--- a/src/share/vm/runtime/java.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/java.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -92,6 +92,7 @@
   bool _partially_initialized;
 
   bool _thread_park_blocker;
+  bool _post_vm_init_hook_enabled;
 
   bool is_valid() const {
     return (_major != 0 || _partially_initialized);
@@ -113,14 +114,15 @@
 
   JDK_Version() : _major(0), _minor(0), _micro(0), _update(0),
                   _special(0), _build(0), _partially_initialized(false),
-                  _thread_park_blocker(false) {}
+                  _thread_park_blocker(false), _post_vm_init_hook_enabled(false) {}
 
   JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0,
               uint8_t update = 0, uint8_t special = 0, uint8_t build = 0,
-              bool thread_park_blocker = false) :
+              bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false) :
       _major(major), _minor(minor), _micro(micro), _update(update),
       _special(special), _build(build), _partially_initialized(false),
-      _thread_park_blocker(thread_park_blocker) {}
+      _thread_park_blocker(thread_park_blocker),
+      _post_vm_init_hook_enabled(post_vm_init_hook_enabled) {}
 
   // Returns the current running JDK version
   static JDK_Version current() { return _current; }
@@ -144,6 +146,9 @@
   bool supports_thread_park_blocker() const {
     return _thread_park_blocker;
   }
+  bool post_vm_init_hook_enabled() const {
+    return _post_vm_init_hook_enabled;
+  }
 
   // Performs a full ordering comparison using all fields (update, build, etc.)
   int compare(const JDK_Version& other) const;
--- a/src/share/vm/runtime/jniHandles.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/jniHandles.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "oops/oop.inline.hpp"
+#include "prims/jvmtiTagMap.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutexLocker.hpp"
 #ifdef TARGET_OS_FAMILY_linux
@@ -428,6 +429,12 @@
       break;
     }
   }
+
+  /*
+   * JvmtiTagMap may also contain weak oops.  The iteration of it is placed
+   * here so that we don't need to add it to each of the collectors.
+   */
+  JvmtiTagMap::weak_oops_do(is_alive, f);
 }
 
 
--- a/src/share/vm/runtime/thread.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/thread.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -31,6 +31,7 @@
 #include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/instanceKlass.hpp"
@@ -977,6 +978,19 @@
 }
 #endif // KERNEL
 
+// General purpose hook into Java code, run once when the VM is initialized.
+// The Java library method itself may be changed independently from the VM.
+static void call_postVMInitHook(TRAPS) {
+  klassOop k = SystemDictionary::sun_misc_PostVMInitHook_klass();
+  instanceKlassHandle klass (THREAD, k);
+  if (klass.not_null()) {
+    JavaValue result(T_VOID);
+    JavaCalls::call_static(&result, klass, vmSymbolHandles::run_method_name(),
+                                           vmSymbolHandles::void_method_signature(),
+                                           CHECK);
+  }
+}
+
 static void reset_vm_info_property(TRAPS) {
   // the vm info string
   ResourceMark rm(THREAD);
@@ -1699,7 +1713,7 @@
     tlab().make_parsable(true);  // retire TLAB
   }
 
-  if (jvmti_thread_state() != NULL) {
+  if (JvmtiEnv::environments_might_exist()) {
     JvmtiExport::cleanup_thread(this);
   }
 
@@ -3345,6 +3359,14 @@
 
   BiasedLocking::init();
 
+  if (JDK_Version::current().post_vm_init_hook_enabled()) {
+    call_postVMInitHook(THREAD);
+    // The Java side of PostVMInitHook.run must deal with all
+    // exceptions and provide means of diagnosis.
+    if (HAS_PENDING_EXCEPTION) {
+      CLEAR_PENDING_EXCEPTION;
+    }
+  }
 
   // Start up the WatcherThread if there are any periodic tasks
   // NOTE:  All PeriodicTasks should be registered by now. If they
--- a/src/share/vm/runtime/thread.hpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/thread.hpp	Thu Jan 13 22:54:23 2011 -0800
@@ -809,7 +809,7 @@
   //
   // _vm_exited is a special value to cover the case of a JavaThread
   // executing native code after the VM itself is terminated.
-  TerminatedTypes       _terminated;
+  volatile TerminatedTypes _terminated;
   // suspend/resume support
   volatile bool         _suspend_equivalent;     // Suspend equivalent condition
   jint                  _in_deopt_handler;       // count of deoptimization
--- a/src/share/vm/runtime/vmStructs.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/runtime/vmStructs.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,8 +219,8 @@
   volatile_nonstatic_field(oopDesc,            _metadata._compressed_klass,                   narrowOop)                             \
      static_field(oopDesc,                     _bs,                                           BarrierSet*)                           \
   nonstatic_field(arrayKlass,                  _dimension,                                    int)                                   \
-  nonstatic_field(arrayKlass,                  _higher_dimension,                             klassOop)                              \
-  nonstatic_field(arrayKlass,                  _lower_dimension,                              klassOop)                              \
+  volatile_nonstatic_field(arrayKlass,         _higher_dimension,                             klassOop)                              \
+  volatile_nonstatic_field(arrayKlass,         _lower_dimension,                              klassOop)                              \
   nonstatic_field(arrayKlass,                  _vtable_len,                                   int)                                   \
   nonstatic_field(arrayKlass,                  _alloc_size,                                   juint)                                 \
   nonstatic_field(arrayKlass,                  _component_mirror,                             oop)                                   \
--- a/src/share/vm/utilities/vmError.cpp	Thu Jan 13 22:53:34 2011 -0800
+++ b/src/share/vm/utilities/vmError.cpp	Thu Jan 13 22:54:23 2011 -0800
@@ -874,11 +874,13 @@
       }
 
       if (fd == -1) {
-        // try temp directory
         const char * tmpdir = os::get_temp_directory();
-        jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
-                     tmpdir, os::file_separator(), os::current_process_id());
-        fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+        // try temp directory if it exists.
+        if (tmpdir != NULL && tmpdir[0] != '\0') {
+          jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
+                       tmpdir, os::file_separator(), os::current_process_id());
+          fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+        }
       }
 
       if (fd != -1) {