changeset 641:660978a2a31a

6791178: Specialize for zero as the compressed oop vm heap base Summary: Use zero based compressed oops if java heap is below 32gb and unscaled compressed oops if java heap is below 4gb. Reviewed-by: never, twisti, jcoomes, coleenp
author kvn
date Thu, 12 Mar 2009 10:37:46 -0700
parents 6af0a709d52b
children c771b7f43bbf
files agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java src/cpu/sparc/vm/assembler_sparc.cpp src/cpu/sparc/vm/sparc.ad src/cpu/sparc/vm/stubGenerator_sparc.cpp src/cpu/sparc/vm/vm_version_sparc.cpp src/cpu/sparc/vm/vtableStubs_sparc.cpp src/cpu/x86/vm/assembler_x86.cpp src/cpu/x86/vm/assembler_x86.hpp src/cpu/x86/vm/x86_64.ad src/os/linux/vm/os_linux.cpp src/os/solaris/dtrace/generateJvmOffsets.cpp src/os/solaris/dtrace/jhelper.d src/os/solaris/dtrace/libjvm_db.c src/os/solaris/vm/os_solaris.cpp src/os/windows/vm/os_windows.cpp src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp src/os_cpu/linux_x86/vm/globals_linux_x86.hpp src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp src/os_cpu/windows_x86/vm/globals_windows_x86.hpp src/share/vm/asm/assembler.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp src/share/vm/memory/genCollectedHeap.cpp src/share/vm/memory/universe.cpp src/share/vm/memory/universe.hpp src/share/vm/oops/oop.inline.hpp src/share/vm/opto/addnode.cpp src/share/vm/opto/compile.cpp src/share/vm/opto/connode.cpp src/share/vm/opto/lcm.cpp src/share/vm/opto/matcher.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/os.hpp src/share/vm/runtime/virtualspace.cpp src/share/vm/runtime/virtualspace.hpp src/share/vm/runtime/vmStructs.cpp
diffstat 45 files changed, 1075 insertions(+), 223 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java	Thu Mar 12 10:37:46 2009 -0700
@@ -118,9 +118,9 @@
   public long getJIntSize();
   public long getJLongSize();
   public long getJShortSize();
-  public long getHeapBase();
   public long getHeapOopSize();
-  public long getLogMinObjAlignmentInBytes();
+  public long getNarrowOopBase();
+  public int  getNarrowOopShift();
 
   public ReadResult readBytesFromProcess(long address, long numBytes)
     throws DebuggerException;
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java	Thu Mar 12 10:37:46 2009 -0700
@@ -56,8 +56,8 @@
   // heap data.
   protected long oopSize;
   protected long heapOopSize;
-  protected long heapBase;                 // heap base for compressed oops.
-  protected long logMinObjAlignmentInBytes; // Used to decode compressed oops.
+  protected long narrowOopBase;  // heap base for compressed oops.
+  protected int  narrowOopShift; // shift to decode compressed oops.
   // Should be initialized if desired by calling initCache()
   private PageCache cache;
 
@@ -159,10 +159,10 @@
     javaPrimitiveTypesConfigured = true;
   }
 
-  public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignmentInBytes) {
-    this.heapBase = heapBase;
+  public void putHeapConst(long heapOopSize, long narrowOopBase, int narrowOopShift) {
     this.heapOopSize = heapOopSize;
-    this.logMinObjAlignmentInBytes = logMinObjAlignmentInBytes;
+    this.narrowOopBase = narrowOopBase;
+    this.narrowOopShift = narrowOopShift;
   }
 
   /** May be called by subclasses if desired to initialize the page
@@ -459,7 +459,7 @@
     long value = readCInteger(address, getHeapOopSize(), true);
     if (value != 0) {
       // See oop.inline.hpp decode_heap_oop
-      value = (long)(heapBase + (long)(value << logMinObjAlignmentInBytes));
+      value = (long)(narrowOopBase + (long)(value << narrowOopShift));
     }
     return value;
   }
@@ -545,10 +545,10 @@
     return heapOopSize;
   }
 
-  public long getHeapBase() {
-    return heapBase;
+  public long getNarrowOopBase() {
+    return narrowOopBase;
   }
-  public long getLogMinObjAlignmentInBytes() {
-    return logMinObjAlignmentInBytes;
+  public int getNarrowOopShift() {
+    return narrowOopShift;
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java	Thu Mar 12 10:37:46 2009 -0700
@@ -42,5 +42,5 @@
                                               long jintSize,
                                               long jlongSize,
                                               long jshortSize);
-  public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignment);
+  public void putHeapConst(long heapOopSize, long narrowOopBase, int narrowOopShift);
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java	Thu Mar 12 10:37:46 2009 -0700
@@ -65,9 +65,10 @@
   public long      getJIntSize() throws RemoteException;
   public long      getJLongSize() throws RemoteException;
   public long      getJShortSize() throws RemoteException;
-  public long      getHeapBase() throws RemoteException;
   public long      getHeapOopSize() throws RemoteException;
-  public long      getLogMinObjAlignmentInBytes() throws RemoteException;
+  public long      getNarrowOopBase() throws RemoteException;
+  public int       getNarrowOopShift() throws RemoteException;
+
   public boolean   areThreadsEqual(long addrOrId1, boolean isAddress1,
                                    long addrOrId2, boolean isAddress2) throws RemoteException;
   public int       getThreadHashCode(long addrOrId, boolean isAddress) throws RemoteException;
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java	Thu Mar 12 10:37:46 2009 -0700
@@ -85,9 +85,9 @@
       jlongSize    = remoteDebugger.getJLongSize();
       jshortSize   = remoteDebugger.getJShortSize();
       javaPrimitiveTypesConfigured = true;
-      heapBase     = remoteDebugger.getHeapBase();
+      narrowOopBase  = remoteDebugger.getNarrowOopBase();
+      narrowOopShift = remoteDebugger.getNarrowOopShift();
       heapOopSize  = remoteDebugger.getHeapOopSize();
-      logMinObjAlignmentInBytes  = remoteDebugger.getLogMinObjAlignmentInBytes();
     }
     catch (RemoteException e) {
       throw new DebuggerException(e);
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java	Thu Mar 12 10:37:46 2009 -0700
@@ -114,17 +114,18 @@
     return debugger.getJShortSize();
   }
 
-  public long getHeapBase() throws RemoteException {
-    return debugger.getHeapBase();
-  }
-
   public long getHeapOopSize() throws RemoteException {
     return debugger.getHeapOopSize();
   }
 
-  public long getLogMinObjAlignmentInBytes() throws RemoteException {
-    return debugger.getLogMinObjAlignmentInBytes();
+  public long getNarrowOopBase() throws RemoteException {
+    return debugger.getNarrowOopBase();
   }
+
+  public int  getNarrowOopShift() throws RemoteException {
+    return debugger.getNarrowOopShift();
+  }
+
   public boolean   areThreadsEqual(long addrOrId1, boolean isAddress1,
                                    long addrOrId2, boolean isAddress2) throws RemoteException {
     ThreadProxy t1 = getThreadProxy(addrOrId1, isAddress1);
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Thu Mar 12 10:37:46 2009 -0700
@@ -53,7 +53,8 @@
   // system obj array klass object
   private static sun.jvm.hotspot.types.OopField systemObjArrayKlassObjField;
 
-  private static AddressField heapBaseField;
+  private static AddressField narrowOopBaseField;
+  private static CIntegerField narrowOopShiftField;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -86,7 +87,8 @@
 
     systemObjArrayKlassObjField = type.getOopField("_systemObjArrayKlassObj");
 
-    heapBaseField = type.getAddressField("_heap_base");
+    narrowOopBaseField = type.getAddressField("_narrow_oop._base");
+    narrowOopShiftField = type.getCIntegerField("_narrow_oop._shift");
   }
 
   public Universe() {
@@ -100,14 +102,18 @@
     }
   }
 
-  public static long getHeapBase() {
-    if (heapBaseField.getValue() == null) {
+  public static long getNarrowOopBase() {
+    if (narrowOopBaseField.getValue() == null) {
       return 0;
     } else {
-      return heapBaseField.getValue().minus(null);
+      return narrowOopBaseField.getValue().minus(null);
     }
   }
 
+  public static int getNarrowOopShift() {
+    return (int)narrowOopShiftField.getValue();
+  }
+
   /** Returns "TRUE" iff "p" points into the allocated area of the heap. */
   public boolean isIn(Address p) {
     return heap().isIn(p);
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Wed Mar 11 14:16:13 2009 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Mar 12 10:37:46 2009 -0700
@@ -342,8 +342,8 @@
       throw new RuntimeException("Attempt to initialize VM twice");
     }
     soleInstance = new VM(db, debugger, debugger.getMachineDescription().isBigEndian());
-    debugger.putHeapConst(Universe.getHeapBase(), soleInstance.getHeapOopSize(),
-                          soleInstance.logMinObjAlignmentInBytes);
+    debugger.putHeapConst(soleInstance.getHeapOopSize(), Universe.getNarrowOopBase(),
+                          Universe.getNarrowOopShift());
     for (Iterator iter = vmInitializedObservers.iterator(); iter.hasNext(); ) {
       ((Observer) iter.next()).update(null, null);
     }
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -4316,7 +4316,13 @@
 
 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
   assert (UseCompressedOops, "must be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   verify_oop(src);
+  if (Universe::narrow_oop_base() == NULL) {
+    srlx(src, LogMinObjAlignmentInBytes, dst);
+    return;
+  }
   Label done;
   if (src == dst) {
     // optimize for frequent case src == dst
@@ -4338,26 +4344,39 @@
 
 void MacroAssembler::encode_heap_oop_not_null(Register r) {
   assert (UseCompressedOops, "must be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   verify_oop(r);
-  sub(r, G6_heapbase, r);
+  if (Universe::narrow_oop_base() != NULL)
+    sub(r, G6_heapbase, r);
   srlx(r, LogMinObjAlignmentInBytes, r);
 }
 
 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
   assert (UseCompressedOops, "must be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   verify_oop(src);
-  sub(src, G6_heapbase, dst);
-  srlx(dst, LogMinObjAlignmentInBytes, dst);
+  if (Universe::narrow_oop_base() == NULL) {
+    srlx(src, LogMinObjAlignmentInBytes, dst);
+  } else {
+    sub(src, G6_heapbase, dst);
+    srlx(dst, LogMinObjAlignmentInBytes, dst);
+  }
 }
 
 // Same algorithm as oops.inline.hpp decode_heap_oop.
 void  MacroAssembler::decode_heap_oop(Register src, Register dst) {
   assert (UseCompressedOops, "must be compressed");
-  Label done;
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   sllx(src, LogMinObjAlignmentInBytes, dst);
-  bpr(rc_nz, true, Assembler::pt, dst, done);
-  delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
-  bind(done);
+  if (Universe::narrow_oop_base() != NULL) {
+    Label done;
+    bpr(rc_nz, true, Assembler::pt, dst, done);
+    delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
+    bind(done);
+  }
   verify_oop(dst);
 }
 
@@ -4366,8 +4385,11 @@
   // pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
   assert (UseCompressedOops, "must be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   sllx(r, LogMinObjAlignmentInBytes, r);
-  add(r, G6_heapbase, r);
+  if (Universe::narrow_oop_base() != NULL)
+    add(r, G6_heapbase, r);
 }
 
 void  MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
@@ -4375,14 +4397,17 @@
   // pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
   assert (UseCompressedOops, "must be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   sllx(src, LogMinObjAlignmentInBytes, dst);
-  add(dst, G6_heapbase, dst);
+  if (Universe::narrow_oop_base() != NULL)
+    add(dst, G6_heapbase, dst);
 }
 
 void MacroAssembler::reinit_heapbase() {
   if (UseCompressedOops) {
     // call indirectly to solve generation ordering problem
-    Address base(G6_heapbase, (address)Universe::heap_base_addr());
+    Address base(G6_heapbase, (address)Universe::narrow_oop_base_addr());
     load_ptr_contents(base, G6_heapbase);
   }
 }
--- a/src/cpu/sparc/vm/sparc.ad	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Thu Mar 12 10:37:46 2009 -0700
@@ -547,7 +547,11 @@
     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
     int klass_load_size;
     if (UseCompressedOops) {
-      klass_load_size = 3*BytesPerInstWord; // see MacroAssembler::load_klass()
+      assert(Universe::heap() != NULL, "java heap should be initialized");
+      if (Universe::narrow_oop_base() == NULL)
+        klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
+      else
+        klass_load_size = 3*BytesPerInstWord;
     } else {
       klass_load_size = 1*BytesPerInstWord;
     }
@@ -1601,9 +1605,11 @@
   st->print_cr("\nUEP:");
 #ifdef    _LP64
   if (UseCompressedOops) {
+    assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
     st->print_cr("\tSLL    R_G5,3,R_G5");
-    st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
+    if (Universe::narrow_oop_base() != NULL)
+      st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
   } else {
     st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
   }
@@ -2502,7 +2508,11 @@
       __ load_klass(O0, G3_scratch);
       int klass_load_size;
       if (UseCompressedOops) {
-        klass_load_size = 3*BytesPerInstWord;
+        assert(Universe::heap() != NULL, "java heap should be initialized");
+        if (Universe::narrow_oop_base() == NULL)
+          klass_load_size = 2*BytesPerInstWord;
+        else
+          klass_load_size = 3*BytesPerInstWord;
       } else {
         klass_load_size = 1*BytesPerInstWord;
       }
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -2942,14 +2942,15 @@
     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
     StubRoutines::_fence_entry               = generate_fence();
 #endif  // COMPILER2 !=> _LP64
-
-    StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
   }
 
 
   void generate_all() {
     // Generates all stubs and initializes the entry points
 
+    // Generate partial_subtype_check first here since its code depends on
+    // UseZeroBaseCompressedOops which is defined after heap initialization.
+    StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
     // These entry points require SharedInfo::stack0 to be set up in non-core builds
     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -72,6 +72,9 @@
         FLAG_SET_ERGO(bool, UseCompressedOops, false);
       }
     }
+    // 32-bit oops don't make sense for the 64-bit VM on sparc
+    // since the 32-bit VM has the same registers and smaller objects.
+    Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 #endif // _LP64
 #ifdef COMPILER2
     // Indirect branch is the same cost as direct
--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -221,13 +221,15 @@
     if (is_vtable_stub) {
       // ld;ld;ld,jmp,nop
       const int basic = 5*BytesPerInstWord +
-                        // shift;add for load_klass
-                        (UseCompressedOops ? 2*BytesPerInstWord : 0);
+                        // shift;add for load_klass (only shift with zero heap based)
+                        (UseCompressedOops ?
+                         ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
       return basic + slop;
     } else {
       const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
-                        // shift;add for load_klass
-                        (UseCompressedOops ? 2*BytesPerInstWord : 0);
+                        // shift;add for load_klass (only shift with zero heap based)
+                        (UseCompressedOops ?
+                         ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
       return (basic + slop);
     }
   }
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -727,7 +727,7 @@
   }
 
 #ifdef _LP64
-  assert(false, "fix locate_operand");
+  assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
 #else
   assert(which == imm_operand, "instruction has only an imm field");
 #endif // LP64
@@ -3224,12 +3224,6 @@
   emit_byte(0xF1);
 }
 
-void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format) {
-  InstructionMark im(this);
-  int encode = prefix_and_encode(dst->encoding());
-  emit_byte(0xB8 | encode);
-  emit_data((int)imm32, rspec, format);
-}
 
 #ifndef _LP64
 
@@ -3249,6 +3243,12 @@
   emit_data((int)imm32, rspec, 0);
 }
 
+void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
+  InstructionMark im(this);
+  int encode = prefix_and_encode(dst->encoding());
+  emit_byte(0xB8 | encode);
+  emit_data((int)imm32, rspec, 0);
+}
 
 void Assembler::popa() { // 32bit
   emit_byte(0x61);
@@ -3857,6 +3857,37 @@
   emit_data64(imm64, rspec);
 }
 
+void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
+  InstructionMark im(this);
+  int encode = prefix_and_encode(dst->encoding());
+  emit_byte(0xB8 | encode);
+  emit_data((int)imm32, rspec, narrow_oop_operand);
+}
+
+void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
+  InstructionMark im(this);
+  prefix(dst);
+  emit_byte(0xC7);
+  emit_operand(rax, dst, 4);
+  emit_data((int)imm32, rspec, narrow_oop_operand);
+}
+
+void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
+  InstructionMark im(this);
+  int encode = prefix_and_encode(src1->encoding());
+  emit_byte(0x81);
+  emit_byte(0xF8 | encode);
+  emit_data((int)imm32, rspec, narrow_oop_operand);
+}
+
+void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
+  InstructionMark im(this);
+  prefix(src1);
+  emit_byte(0x81);
+  emit_operand(rax, src1, 4);
+  emit_data((int)imm32, rspec, narrow_oop_operand);
+}
+
 void Assembler::movdq(XMMRegister dst, Register src) {
   // table D-1 says MMX/SSE2
   NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
@@ -7710,14 +7741,21 @@
 void MacroAssembler::load_prototype_header(Register dst, Register src) {
 #ifdef _LP64
   if (UseCompressedOops) {
+    assert (Universe::heap() != NULL, "java heap should be initialized");
     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
-    movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+    if (Universe::narrow_oop_shift() != 0) {
+      assert(Address::times_8 == LogMinObjAlignmentInBytes &&
+             Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
+      movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+    } else {
+      movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+    }
   } else
 #endif
-    {
-      movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
-      movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
-    }
+  {
+    movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+    movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+  }
 }
 
 void MacroAssembler::store_klass(Register dst, Register src) {
@@ -7760,11 +7798,20 @@
 // Algorithm must match oop.inline.hpp encode_heap_oop.
 void MacroAssembler::encode_heap_oop(Register r) {
   assert (UseCompressedOops, "should be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  if (Universe::narrow_oop_base() == NULL) {
+    verify_oop(r, "broken oop in encode_heap_oop");
+    if (Universe::narrow_oop_shift() != 0) {
+      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+      shrq(r, LogMinObjAlignmentInBytes);
+    }
+    return;
+  }
 #ifdef ASSERT
   if (CheckCompressedOops) {
     Label ok;
     push(rscratch1); // cmpptr trashes rscratch1
-    cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
+    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
     jcc(Assembler::equal, ok);
     stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
     bind(ok);
@@ -7780,6 +7827,7 @@
 
 void MacroAssembler::encode_heap_oop_not_null(Register r) {
   assert (UseCompressedOops, "should be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
 #ifdef ASSERT
   if (CheckCompressedOops) {
     Label ok;
@@ -7790,12 +7838,18 @@
   }
 #endif
   verify_oop(r, "broken oop in encode_heap_oop_not_null");
-  subq(r, r12_heapbase);
-  shrq(r, LogMinObjAlignmentInBytes);
+  if (Universe::narrow_oop_base() != NULL) {
+    subq(r, r12_heapbase);
+  }
+  if (Universe::narrow_oop_shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+    shrq(r, LogMinObjAlignmentInBytes);
+  }
 }
 
 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
   assert (UseCompressedOops, "should be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
 #ifdef ASSERT
   if (CheckCompressedOops) {
     Label ok;
@@ -7809,18 +7863,32 @@
   if (dst != src) {
     movq(dst, src);
   }
-  subq(dst, r12_heapbase);
-  shrq(dst, LogMinObjAlignmentInBytes);
+  if (Universe::narrow_oop_base() != NULL) {
+    subq(dst, r12_heapbase);
+  }
+  if (Universe::narrow_oop_shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+    shrq(dst, LogMinObjAlignmentInBytes);
+  }
 }
 
 void  MacroAssembler::decode_heap_oop(Register r) {
   assert (UseCompressedOops, "should be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  if (Universe::narrow_oop_base() == NULL) {
+    if (Universe::narrow_oop_shift() != 0) {
+      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+      shlq(r, LogMinObjAlignmentInBytes);
+    }
+    verify_oop(r, "broken oop in decode_heap_oop");
+    return;
+  }
 #ifdef ASSERT
   if (CheckCompressedOops) {
     Label ok;
     push(rscratch1);
     cmpptr(r12_heapbase,
-           ExternalAddress((address)Universe::heap_base_addr()));
+           ExternalAddress((address)Universe::narrow_oop_base_addr()));
     jcc(Assembler::equal, ok);
     stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
     bind(ok);
@@ -7844,32 +7912,76 @@
 
 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
   assert (UseCompressedOops, "should only be used for compressed headers");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
-  assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
-  leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+  if (Universe::narrow_oop_base() == NULL) {
+    if (Universe::narrow_oop_shift() != 0) {
+      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+      shlq(r, LogMinObjAlignmentInBytes);
+    }
+  } else {
+      assert (Address::times_8 == LogMinObjAlignmentInBytes &&
+              Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
+    leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+  }
 }
 
 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
   assert (UseCompressedOops, "should only be used for compressed headers");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
-  assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
-  leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+  if (Universe::narrow_oop_shift() != 0) {
+    assert (Address::times_8 == LogMinObjAlignmentInBytes &&
+            Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
+    leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+  } else if (dst != src) {
+    movq(dst, src);
+  }
 }
 
 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
-  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+  assert (UseCompressedOops, "should only be used for compressed headers");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+  int oop_index = oop_recorder()->find_index(obj);
+  RelocationHolder rspec = oop_Relocation::spec(oop_index);
+  mov_narrow_oop(dst, oop_index, rspec);
+}
+
+void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
+  assert (UseCompressedOops, "should only be used for compressed headers");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int oop_index = oop_recorder()->find_index(obj);
   RelocationHolder rspec = oop_Relocation::spec(oop_index);
-  mov_literal32(dst, oop_index, rspec, narrow_oop_operand);
+  mov_narrow_oop(dst, oop_index, rspec);
+}
+
+void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
+  assert (UseCompressedOops, "should only be used for compressed headers");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+  int oop_index = oop_recorder()->find_index(obj);
+  RelocationHolder rspec = oop_Relocation::spec(oop_index);
+  Assembler::cmp_narrow_oop(dst, oop_index, rspec);
+}
+
+void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
+  assert (UseCompressedOops, "should only be used for compressed headers");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+  int oop_index = oop_recorder()->find_index(obj);
+  RelocationHolder rspec = oop_Relocation::spec(oop_index);
+  Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 }
 
 void MacroAssembler::reinit_heapbase() {
   if (UseCompressedOops) {
-    movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
+    movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
   }
 }
 #endif // _LP64
--- a/src/cpu/x86/vm/assembler_x86.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -578,20 +578,25 @@
 
   // These are all easily abused and hence protected
 
-  void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format = 0);
-
   // 32BIT ONLY SECTION
 #ifndef _LP64
   // Make these disappear in 64bit mode since they would never be correct
   void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec);   // 32BIT ONLY
   void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
 
+  void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
   void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec);     // 32BIT ONLY
 
   void push_literal32(int32_t imm32, RelocationHolder const& rspec);                 // 32BIT ONLY
 #else
   // 64BIT ONLY SECTION
   void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec);   // 64BIT ONLY
+
+  void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
+  void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
+
+  void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
+  void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
 #endif // _LP64
 
   // These are unique in that we are ensured by the caller that the 32bit
@@ -1647,6 +1652,9 @@
   void decode_heap_oop_not_null(Register dst, Register src);
 
   void set_narrow_oop(Register dst, jobject obj);
+  void set_narrow_oop(Address dst, jobject obj);
+  void cmp_narrow_oop(Register dst, jobject obj);
+  void cmp_narrow_oop(Address dst, jobject obj);
 
   // if heap base register is used - reinit it with the correct value
   void reinit_heapbase();
--- a/src/cpu/x86/vm/x86_64.ad	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Thu Mar 12 10:37:46 2009 -0700
@@ -326,7 +326,6 @@
                          R9,  R9_H,
                          R10, R10_H,
                          R11, R11_H,
-                         R12, R12_H,
                          R13, R13_H,
                          R14, R14_H);
 
@@ -340,7 +339,6 @@
                          R9,  R9_H,
                          R10, R10_H,
                          R11, R11_H,
-                         R12, R12_H,
                          R13, R13_H,
                          R14, R14_H);
 
@@ -354,7 +352,6 @@
                              R9,  R9_H,
                              R10, R10_H,
                              R11, R11_H,
-                             R12, R12_H,
                              R13, R13_H,
                              R14, R14_H);
 
@@ -444,9 +441,6 @@
 // Singleton class for RDX long register
 reg_class long_rdx_reg(RDX, RDX_H);
 
-// Singleton class for R12 long register
-reg_class long_r12_reg(R12, R12_H);
-
 // Class for all int registers (except RSP)
 reg_class int_reg(RAX,
                   RDX,
@@ -1842,7 +1836,9 @@
 {
   if (UseCompressedOops) {
     st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
-    st->print_cr("leaq    rscratch1, [r12_heapbase, r, Address::times_8, 0]");
+    if (Universe::narrow_oop_shift() != 0) {
+      st->print_cr("leaq    rscratch1, [r12_heapbase, r, Address::times_8, 0]");
+    }
     st->print_cr("cmpq    rax, rscratch1\t # Inline cache check");
   } else {
     st->print_cr("cmpq    rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
@@ -1891,7 +1887,11 @@
 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 {
   if (UseCompressedOops) {
-    return OptoBreakpoint ? 19 : 20;
+    if (Universe::narrow_oop_shift() == 0) {
+      return OptoBreakpoint ? 15 : 16;
+    } else {
+      return OptoBreakpoint ? 19 : 20;
+    }
   } else {
     return OptoBreakpoint ? 11 : 12;
   }
@@ -2593,21 +2593,19 @@
     __ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
     __ addptr(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
     if (UseCompressedOops) {
+      __ push(Rrax);
       __ encode_heap_oop(Rrax);
       __ repne_scanl();
-      __ jcc(Assembler::notEqual, cmiss);
-      __ decode_heap_oop(Rrax);
+      __ pop(Rrax);
+      __ jccb(Assembler::notEqual, miss);
       __ movptr(Address(Rrsi,
                       sizeof(oopDesc) +
                       Klass::secondary_super_cache_offset_in_bytes()),
               Rrax);
       __ jmp(hit);
-      __ bind(cmiss);
-      __ decode_heap_oop(Rrax);
-      __ jmp(miss);
     } else {
       __ repne_scan();
-      __ jcc(Assembler::notEqual, miss);
+      __ jccb(Assembler::notEqual, miss);
       __ movptr(Address(Rrsi,
                       sizeof(oopDesc) +
                       Klass::secondary_super_cache_offset_in_bytes()),
@@ -4906,15 +4904,6 @@
   interface(REG_INTER);
 %}
 
-
-operand r12RegL() %{
-  constraint(ALLOC_IN_RC(long_r12_reg));
-  match(RegL);
-
-  format %{ %}
-  interface(REG_INTER);
-%}
-
 operand rRegN() %{
   constraint(ALLOC_IN_RC(int_reg));
   match(RegN);
@@ -5289,21 +5278,6 @@
   %}
 %}
 
-// Indirect Narrow Oop Plus Offset Operand
-operand indNarrowOopOffset(rRegN src, immL32 off) %{
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (DecodeN src) off);
-
-  op_cost(10);
-  format %{"[R12 + $src << 3 + $off] (compressed oop addressing)" %}
-  interface(MEMORY_INTER) %{
-    base(0xc); // R12
-    index($src);
-    scale(0x3);
-    disp($off);
-  %}
-%}
-
 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
 %{
@@ -5321,6 +5295,158 @@
   %}
 %}
 
+// Indirect Narrow Oop Plus Offset Operand
+// Note: x86 architecture doesn't support "scale * index + offset" without a base
+// we can't free r12 even with Universe::narrow_oop_base() == NULL.
+operand indCompressedOopOffset(rRegN reg, immL32 off) %{
+  predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) off);
+
+  op_cost(10);
+  format %{"[R12 + $reg << 3 + $off] (compressed oop addressing)" %}
+  interface(MEMORY_INTER) %{
+    base(0xc); // R12
+    index($reg);
+    scale(0x3);
+    disp($off);
+  %}
+%}
+
+// Indirect Memory Operand
+operand indirectNarrow(rRegN reg)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(DecodeN reg);
+
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x4);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+// Indirect Memory Plus Short Offset Operand
+operand indOffset8Narrow(rRegN reg, immL8 off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) off);
+
+  format %{ "[$reg + $off (8-bit)]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x4);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+// Indirect Memory Plus Long Offset Operand
+operand indOffset32Narrow(rRegN reg, immL32 off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) off);
+
+  format %{ "[$reg + $off (32-bit)]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x4);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+// Indirect Memory Plus Index Register Plus Offset Operand
+operand indIndexOffsetNarrow(rRegN reg, rRegL lreg, immL32 off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP (DecodeN reg) lreg) off);
+
+  op_cost(10);
+  format %{"[$reg + $off + $lreg]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+// Indirect Memory Plus Index Register Plus Offset Operand
+operand indIndexNarrow(rRegN reg, rRegL lreg)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) lreg);
+
+  op_cost(10);
+  format %{"[$reg + $lreg]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+// Indirect Memory Times Scale Plus Index Register
+operand indIndexScaleNarrow(rRegN reg, rRegL lreg, immI2 scale)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) (LShiftL lreg scale));
+
+  op_cost(10);
+  format %{"[$reg + $lreg << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+// Indirect Memory Times Scale Plus Index Register Plus Offset Operand
+operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
+
+  op_cost(10);
+  format %{"[$reg + $off + $lreg << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+// Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
+operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
+  match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L idx) scale)) off);
+
+  op_cost(10);
+  format %{"[$reg + $off + $idx << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($idx);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+
 //----------Special Memory Operands--------------------------------------------
 // Stack Slot Operand - This operand is used for loading and storing temporary
 //                      values on the stack where a match requires a value to
@@ -5488,7 +5614,10 @@
 
 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
                indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
-               indNarrowOopOffset);
+               indCompressedOopOffset,
+               indirectNarrow, indOffset8Narrow, indOffset32Narrow,
+               indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
+               indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
 
 //----------PIPELINE-----------------------------------------------------------
 // Rules which define the behavior of the target architectures pipeline.
@@ -6234,9 +6363,7 @@
    ins_cost(125); // XXX
    format %{ "movl    $dst, $mem\t# compressed ptr" %}
    ins_encode %{
-     Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
-     Register dst = as_Register($dst$$reg);
-     __ movl(dst, addr);
+     __ movl($dst$$Register, $mem$$Address);
    %}
    ins_pipe(ialu_reg_mem); // XXX
 %}
@@ -6262,9 +6389,7 @@
   ins_cost(125); // XXX
   format %{ "movl    $dst, $mem\t# compressed klass ptr" %}
   ins_encode %{
-    Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
-    Register dst = as_Register($dst$$reg);
-    __ movl(dst, addr);
+    __ movl($dst$$Register, $mem$$Address);
   %}
   ins_pipe(ialu_reg_mem); // XXX
 %}
@@ -6418,6 +6543,102 @@
   ins_pipe(ialu_reg_reg_fat);
 %}
 
+instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem)
+%{
+  match(Set dst mem);
+
+  ins_cost(110);
+  format %{ "leaq    $dst, $mem\t# ptr posidxscaleoff" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
+// Load Effective Address which uses Narrow (32-bits) oop
+instruct leaPCompressedOopOffset(rRegP dst, indCompressedOopOffset mem)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
+  match(Set dst mem);
+
+  ins_cost(110);
+  format %{ "leaq    $dst, $mem\t# ptr compressedoopoff32" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
+instruct leaP8Narrow(rRegP dst, indOffset8Narrow mem)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  match(Set dst mem);
+
+  ins_cost(110); // XXX
+  format %{ "leaq    $dst, $mem\t# ptr off8narrow" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
+instruct leaP32Narrow(rRegP dst, indOffset32Narrow mem)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  match(Set dst mem);
+
+  ins_cost(110);
+  format %{ "leaq    $dst, $mem\t# ptr off32narrow" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
+instruct leaPIdxOffNarrow(rRegP dst, indIndexOffsetNarrow mem)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  match(Set dst mem);
+
+  ins_cost(110);
+  format %{ "leaq    $dst, $mem\t# ptr idxoffnarrow" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
+instruct leaPIdxScaleNarrow(rRegP dst, indIndexScaleNarrow mem)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  match(Set dst mem);
+
+  ins_cost(110);
+  format %{ "leaq    $dst, $mem\t# ptr idxscalenarrow" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
+instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  match(Set dst mem);
+
+  ins_cost(110);
+  format %{ "leaq    $dst, $mem\t# ptr idxscaleoffnarrow" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
+instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  match(Set dst mem);
+
+  ins_cost(110);
+  format %{ "leaq    $dst, $mem\t# ptr posidxscaleoffnarrow" %}
+  opcode(0x8D);
+  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+  ins_pipe(ialu_reg_reg_fat);
+%}
+
 instruct loadConI(rRegI dst, immI src)
 %{
   match(Set dst src);
@@ -6528,8 +6749,7 @@
   effect(KILL cr);
   format %{ "xorq    $dst, $src\t# compressed NULL ptr" %}
   ins_encode %{
-    Register dst = $dst$$Register;
-    __ xorq(dst, dst);
+    __ xorq($dst$$Register, $dst$$Register);
   %}
   ins_pipe(ialu_reg);
 %}
@@ -6541,11 +6761,10 @@
   format %{ "movl    $dst, $src\t# compressed ptr" %}
   ins_encode %{
     address con = (address)$src$$constant;
-    Register dst = $dst$$Register;
     if (con == NULL) {
       ShouldNotReachHere();
     } else {
-      __ set_narrow_oop(dst, (jobject)$src$$constant);
+      __ set_narrow_oop($dst$$Register, (jobject)$src$$constant);
     }
   %}
   ins_pipe(ialu_reg_fat); // XXX
@@ -6794,12 +7013,25 @@
   ins_pipe(ialu_mem_reg);
 %}
 
+instruct storeImmP0(memory mem, immP0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreP mem zero));
+
+  ins_cost(125); // XXX
+  format %{ "movq    $mem, R12\t# ptr (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movq($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 // Store NULL Pointer, mark word, or other simple pointer constant.
 instruct storeImmP(memory mem, immP31 src)
 %{
   match(Set mem (StoreP mem src));
 
-  ins_cost(125); // XXX
+  ins_cost(150); // XXX
   format %{ "movq    $mem, $src\t# ptr" %}
   opcode(0xC7); /* C7 /0 */
   ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
@@ -6814,14 +7046,55 @@
   ins_cost(125); // XXX
   format %{ "movl    $mem, $src\t# compressed ptr" %}
   ins_encode %{
-    Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
-    Register src = as_Register($src$$reg);
-    __ movl(addr, src);
+    __ movl($mem$$Address, $src$$Register);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
+instruct storeImmN0(memory mem, immN0 zero)
+%{
+  predicate(Universe::narrow_oop_base() == NULL);
+  match(Set mem (StoreN mem zero));
+
+  ins_cost(125); // XXX
+  format %{ "movl    $mem, R12\t# compressed ptr (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movl($mem$$Address, r12);
   %}
   ins_pipe(ialu_mem_reg);
 %}
 
+instruct storeImmN(memory mem, immN src)
+%{
+  match(Set mem (StoreN mem src));
+
+  ins_cost(150); // XXX
+  format %{ "movl    $mem, $src\t# compressed ptr" %}
+  ins_encode %{
+    address con = (address)$src$$constant;
+    if (con == NULL) {
+      __ movl($mem$$Address, (int32_t)0);
+    } else {
+      __ set_narrow_oop($mem$$Address, (jobject)$src$$constant);
+    }
+  %}
+  ins_pipe(ialu_mem_imm);
+%}
+
 // Store Integer Immediate
+instruct storeImmI0(memory mem, immI0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreI mem zero));
+
+  ins_cost(125); // XXX
+  format %{ "movl    $mem, R12\t# int (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movl($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 instruct storeImmI(memory mem, immI src)
 %{
   match(Set mem (StoreI mem src));
@@ -6834,6 +7107,19 @@
 %}
 
 // Store Long Immediate
+instruct storeImmL0(memory mem, immL0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreL mem zero));
+
+  ins_cost(125); // XXX
+  format %{ "movq    $mem, R12\t# long (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movq($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 instruct storeImmL(memory mem, immL32 src)
 %{
   match(Set mem (StoreL mem src));
@@ -6846,6 +7132,19 @@
 %}
 
 // Store Short/Char Immediate
+instruct storeImmC0(memory mem, immI0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreC mem zero));
+
+  ins_cost(125); // XXX
+  format %{ "movw    $mem, R12\t# short/char (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movw($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 instruct storeImmI16(memory mem, immI16 src)
 %{
   predicate(UseStoreImmI16);
@@ -6859,6 +7158,19 @@
 %}
 
 // Store Byte Immediate
+instruct storeImmB0(memory mem, immI0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreB mem zero));
+
+  ins_cost(125); // XXX
+  format %{ "movb    $mem, R12\t# short/char (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movb($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 instruct storeImmB(memory mem, immI8 src)
 %{
   match(Set mem (StoreB mem src));
@@ -6898,6 +7210,19 @@
 %}
 
 // Store CMS card-mark Immediate
+instruct storeImmCM0_reg(memory mem, immI0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreCM mem zero));
+
+  ins_cost(125); // XXX
+  format %{ "movb    $mem, R12\t# CMS card-mark byte 0 (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movb($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 instruct storeImmCM0(memory mem, immI0 src)
 %{
   match(Set mem (StoreCM mem src));
@@ -6931,6 +7256,19 @@
 %}
 
 // Store immediate Float value (it is faster than store from XMM register)
+instruct storeF0(memory mem, immF0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreF mem zero));
+
+  ins_cost(25); // XXX
+  format %{ "movl    $mem, R12\t# float 0. (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movl($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 instruct storeF_imm(memory mem, immF src)
 %{
   match(Set mem (StoreF mem src));
@@ -6957,6 +7295,7 @@
 // Store immediate double 0.0 (it is faster than store from XMM register)
 instruct storeD0_imm(memory mem, immD0 src)
 %{
+  predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
   match(Set mem (StoreD mem src));
 
   ins_cost(50);
@@ -6966,6 +7305,19 @@
   ins_pipe(ialu_mem_imm);
 %}
 
+instruct storeD0(memory mem, immD0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set mem (StoreD mem zero));
+
+  ins_cost(25); // XXX
+  format %{ "movq    $mem, R12\t# double 0. (R12_heapbase==0)" %}
+  ins_encode %{
+    __ movq($mem$$Address, r12);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 instruct storeSSI(stackSlotI dst, rRegI src)
 %{
   match(Set dst src);
@@ -7192,9 +7544,7 @@
   effect(KILL cr);
   format %{ "encode_heap_oop_not_null $dst,$src" %}
   ins_encode %{
-    Register s = $src$$Register;
-    Register d = $dst$$Register;
-    __ encode_heap_oop_not_null(d, s);
+    __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
   %}
   ins_pipe(ialu_reg_long);
 %}
@@ -7224,7 +7574,11 @@
   ins_encode %{
     Register s = $src$$Register;
     Register d = $dst$$Register;
-    __ decode_heap_oop_not_null(d, s);
+    if (s != d) {
+      __ decode_heap_oop_not_null(d, s);
+    } else {
+      __ decode_heap_oop_not_null(d);
+    }
   %}
   ins_pipe(ialu_reg_long);
 %}
@@ -11389,8 +11743,9 @@
 
 // This will generate a signed flags result. This should be OK since
 // any compare to a zero should be eq/neq.
-instruct testP_reg_mem(rFlagsReg cr, memory op, immP0 zero)
-%{
+instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
+%{
+  predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
   match(Set cr (CmpP (LoadP op) zero));
 
   ins_cost(500); // XXX
@@ -11401,13 +11756,24 @@
   ins_pipe(ialu_cr_reg_imm);
 %}
 
+instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
+%{
+  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+  match(Set cr (CmpP (LoadP mem) zero));
+
+  format %{ "cmpq    R12, $mem\t# ptr (R12_heapbase==0)" %}
+  ins_encode %{
+    __ cmpq(r12, $mem$$Address);
+  %}
+  ins_pipe(ialu_cr_reg_mem);
+%}
 
 instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2)
 %{
   match(Set cr (CmpN op1 op2));
 
   format %{ "cmpl    $op1, $op2\t# compressed ptr" %}
-  ins_encode %{ __ cmpl(as_Register($op1$$reg), as_Register($op2$$reg)); %}
+  ins_encode %{ __ cmpl($op1$$Register, $op2$$Register); %}
   ins_pipe(ialu_cr_reg_reg);
 %}
 
@@ -11415,11 +11781,30 @@
 %{
   match(Set cr (CmpN src (LoadN mem)));
 
-  ins_cost(500); // XXX
-  format %{ "cmpl    $src, mem\t# compressed ptr" %}
+  format %{ "cmpl    $src, $mem\t# compressed ptr" %}
+  ins_encode %{
+    __ cmpl($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(ialu_cr_reg_mem);
+%}
+
+instruct compN_rReg_imm(rFlagsRegU cr, rRegN op1, immN op2) %{
+  match(Set cr (CmpN op1 op2));
+
+  format %{ "cmpl    $op1, $op2\t# compressed ptr" %}
   ins_encode %{
-    Address adr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
-    __ cmpl(as_Register($src$$reg), adr);
+    __ cmp_narrow_oop($op1$$Register, (jobject)$op2$$constant);
+  %}
+  ins_pipe(ialu_cr_reg_imm);
+%}
+
+instruct compN_mem_imm(rFlagsRegU cr, memory mem, immN src)
+%{
+  match(Set cr (CmpN src (LoadN mem)));
+
+  format %{ "cmpl    $mem, $src\t# compressed ptr" %}
+  ins_encode %{
+    __ cmp_narrow_oop($mem$$Address, (jobject)$src$$constant);
   %}
   ins_pipe(ialu_cr_reg_mem);
 %}
@@ -11432,15 +11817,27 @@
   ins_pipe(ialu_cr_reg_imm);
 %}
 
-instruct testN_reg_mem(rFlagsReg cr, memory mem, immN0 zero)
-%{
+instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero)
+%{
+  predicate(Universe::narrow_oop_base() != NULL);
   match(Set cr (CmpN (LoadN mem) zero));
 
   ins_cost(500); // XXX
   format %{ "testl   $mem, 0xffffffff\t# compressed ptr" %}
   ins_encode %{
-    Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
-    __ cmpl(addr, (int)0xFFFFFFFF);
+    __ cmpl($mem$$Address, (int)0xFFFFFFFF);
+  %}
+  ins_pipe(ialu_cr_reg_mem);
+%}
+
+instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero)
+%{
+  predicate(Universe::narrow_oop_base() == NULL);
+  match(Set cr (CmpN (LoadN mem) zero));
+
+  format %{ "cmpl    R12, $mem\t# compressed ptr (R12_heapbase==0)" %}
+  ins_encode %{
+    __ cmpl(r12, $mem$$Address);
   %}
   ins_pipe(ialu_cr_reg_mem);
 %}
@@ -11472,7 +11869,6 @@
 %{
   match(Set cr (CmpL op1 (LoadL op2)));
 
-  ins_cost(500); // XXX
   format %{ "cmpq    $op1, $op2" %}
   opcode(0x3B); /* Opcode 3B /r */
   ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
@@ -11756,7 +12152,6 @@
                                      rdi_RegP result)
 %{
   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
-  predicate(!UseCompressedOops); // decoding oop kills condition codes
   effect(KILL rcx, KILL result);
 
   ins_cost(1000);
--- a/src/os/linux/vm/os_linux.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -2582,7 +2582,7 @@
 #define SHM_HUGETLB 04000
 #endif
 
-char* os::reserve_memory_special(size_t bytes) {
+char* os::reserve_memory_special(size_t bytes, char* req_addr) {
   assert(UseLargePages, "only for large pages");
 
   key_t key = IPC_PRIVATE;
--- a/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os/solaris/dtrace/generateJvmOffsets.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -249,6 +249,10 @@
 
   printf("\n");
 
+  GEN_OFFS(NarrowOopStruct, _base);
+  GEN_OFFS(NarrowOopStruct, _shift);
+  printf("\n");
+
   GEN_VALUE(SIZE_HeapBlockHeader, sizeof(HeapBlock::Header));
   GEN_SIZE(oopDesc);
   GEN_SIZE(constantPoolOopDesc);
--- a/src/os/solaris/dtrace/jhelper.d	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os/solaris/dtrace/jhelper.d	Thu Mar 12 10:37:46 2009 -0700
@@ -46,7 +46,10 @@
 extern pointer __1cJCodeCacheF_heap_;
 extern pointer __1cIUniverseP_methodKlassObj_;
 extern pointer __1cIUniverseO_collectedHeap_;
-extern pointer __1cIUniverseK_heap_base_;
+extern pointer __1cIUniverseL_narrow_oop_;
+#ifdef _LP64
+extern pointer UseCompressedOops;
+#endif
 
 extern pointer __1cHnmethodG__vtbl_;
 extern pointer __1cKBufferBlobG__vtbl_;
@@ -56,6 +59,7 @@
 #define copyin_uint16(ADDR) *(uint16_t*) copyin((pointer) (ADDR), sizeof(uint16_t))
 #define copyin_uint32(ADDR) *(uint32_t*) copyin((pointer) (ADDR), sizeof(uint32_t))
 #define copyin_int32(ADDR)  *(int32_t*)  copyin((pointer) (ADDR), sizeof(int32_t))
+#define copyin_uint8(ADDR)  *(uint8_t*)  copyin((pointer) (ADDR), sizeof(uint8_t))
 
 #define SAME(x) x
 #define copyin_offset(JVM_CONST)  JVM_CONST = \
@@ -132,6 +136,9 @@
   copyin_offset(SIZE_oopDesc);
   copyin_offset(SIZE_constantPoolOopDesc);
 
+  copyin_offset(OFFSET_NarrowOopStruct_base);
+  copyin_offset(OFFSET_NarrowOopStruct_shift);
+
   /*
    * The PC to translate is in arg0.
    */
@@ -151,9 +158,19 @@
 
   this->Universe_methodKlassOop = copyin_ptr(&``__1cIUniverseP_methodKlassObj_);
   this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
-  this->Universe_heap_base = copyin_ptr(&``__1cIUniverseK_heap_base_);
 
   /* Reading volatile values */
+#ifdef _LP64
+  this->Use_Compressed_Oops  = copyin_uint8(&``UseCompressedOops);
+#else
+  this->Use_Compressed_Oops  = 0;
+#endif
+
+  this->Universe_narrow_oop_base  = copyin_ptr(&``__1cIUniverseL_narrow_oop_ +
+                                               OFFSET_NarrowOopStruct_base);
+  this->Universe_narrow_oop_shift = copyin_int32(&``__1cIUniverseL_narrow_oop_ +
+                                                 OFFSET_NarrowOopStruct_shift);
+
   this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + 
       OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
 
@@ -295,7 +312,7 @@
 
 dtrace:helper:ustack:
 /!this->done && this->vtbl == this->BufferBlob_vtbl &&
-this->Universe_heap_base == NULL &&
+this->Use_Compressed_Oops == 0 &&
 this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
 {
   MARK_LINE;
@@ -306,7 +323,7 @@
 
 dtrace:helper:ustack:
 /!this->done && this->vtbl == this->BufferBlob_vtbl &&
-this->Universe_heap_base != NULL &&
+this->Use_Compressed_Oops != 0 &&
 this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
 {
   MARK_LINE;
@@ -314,8 +331,8 @@
    * Read compressed pointer and  decode heap oop, same as oop.inline.hpp
    */
   this->cklass = copyin_uint32(this->methodOopPtr + OFFSET_oopDesc_metadata);
-  this->klass = (uint64_t)((uintptr_t)this->Universe_heap_base +
-                ((uintptr_t)this->cklass << 3));
+  this->klass = (uint64_t)((uintptr_t)this->Universe_narrow_oop_base +
+                ((uintptr_t)this->cklass << this->Universe_narrow_oop_shift));
   this->methodOop = this->klass == this->Universe_methodKlassOop;
   this->done = !this->methodOop;
 }
--- a/src/os/solaris/dtrace/libjvm_db.c	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os/solaris/dtrace/libjvm_db.c	Thu Mar 12 10:37:46 2009 -0700
@@ -146,13 +146,17 @@
   uint64_t BufferBlob_vtbl;
   uint64_t RuntimeStub_vtbl;
 
+  uint64_t Use_Compressed_Oops_address;
   uint64_t Universe_methodKlassObj_address;
+  uint64_t Universe_narrow_oop_base_address;
+  uint64_t Universe_narrow_oop_shift_address;
   uint64_t CodeCache_heap_address;
-  uint64_t Universe_heap_base_address;
 
   /* Volatiles */
+  uint8_t  Use_Compressed_Oops;
   uint64_t Universe_methodKlassObj;
-  uint64_t Universe_heap_base;
+  uint64_t Universe_narrow_oop_base;
+  uint32_t Universe_narrow_oop_shift;
   uint64_t CodeCache_low;
   uint64_t CodeCache_high;
   uint64_t CodeCache_segmap_low;
@@ -279,8 +283,11 @@
       if (strcmp("_methodKlassObj", vmp->fieldName) == 0) {
         J->Universe_methodKlassObj_address = vmp->address;
       }
-      if (strcmp("_heap_base", vmp->fieldName) == 0) {
-        J->Universe_heap_base_address = vmp->address;
+      if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
+        J->Universe_narrow_oop_base_address = vmp->address;
+      }
+      if (strcmp("_narrow_oop._shift", vmp->fieldName) == 0) {
+        J->Universe_narrow_oop_shift_address = vmp->address;
       }
     }
     CHECK_FAIL(err);
@@ -298,14 +305,39 @@
   return -1;
 }
 
+static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
+  psaddr_t sym_addr;
+  int err;
+
+  err = ps_pglobal_lookup(J->P, LIBJVM_SO, name, &sym_addr);
+  if (err != PS_OK) goto fail;
+  *valuep = sym_addr;
+  return PS_OK;
+
+ fail:
+  return err;
+}
+
 static int read_volatiles(jvm_agent_t* J) {
   uint64_t ptr;
   int err;
 
+  err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
+  if (err == PS_OK) {
+    err = ps_pread(J->P,  J->Use_Compressed_Oops_address, &J->Use_Compressed_Oops, sizeof(uint8_t));
+    CHECK_FAIL(err);
+  } else {
+    J->Use_Compressed_Oops = 0;
+  }
+
   err = read_pointer(J, J->Universe_methodKlassObj_address, &J->Universe_methodKlassObj);
   CHECK_FAIL(err);
-  err = read_pointer(J, J->Universe_heap_base_address, &J->Universe_heap_base);
+
+  err = read_pointer(J, J->Universe_narrow_oop_base_address, &J->Universe_narrow_oop_base);
   CHECK_FAIL(err);
+  err = ps_pread(J->P,  J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
+  CHECK_FAIL(err);
+
   err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
                      OFFSET_VirtualSpace_low, &J->CodeCache_low);
   CHECK_FAIL(err);
@@ -374,19 +406,6 @@
   return -1;
 }
 
-static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
-  psaddr_t sym_addr;
-  int err;
-
-  err = ps_pglobal_lookup(J->P, LIBJVM_SO, name, &sym_addr);
-  if (err != PS_OK) goto fail;
-  *valuep = sym_addr;
-  return PS_OK;
-
- fail:
-  return err;
-}
-
 static int find_jlong_constant(jvm_agent_t* J, const char *name, uint64_t* valuep) {
   psaddr_t sym_addr;
   int err = ps_pglobal_lookup(J->P, LIBJVM_SO, name, &sym_addr);
@@ -458,14 +477,14 @@
 static int is_methodOop(jvm_agent_t* J, uint64_t methodOopPtr) {
   uint64_t klass;
   int err;
-  // If heap_base is nonnull, this was a compressed oop.
-  if (J->Universe_heap_base != NULL) {
+  // If UseCompressedOops, this was a compressed oop.
+  if (J->Use_Compressed_Oops != 0) {
     uint32_t cklass;
     err = read_compressed_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata,
           &cklass);
     // decode heap oop, same as oop.inline.hpp
-    klass = (uint64_t)((uintptr_t)J->Universe_heap_base +
-            ((uintptr_t)cklass << 3));
+    klass = (uint64_t)((uintptr_t)J->Universe_narrow_oop_base +
+            ((uintptr_t)cklass << J->Universe_narrow_oop_shift));
   } else {
     err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata, &klass);
   }
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -3220,7 +3220,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes) {
+char* os::reserve_memory_special(size_t bytes, char* addr) {
   assert(UseLargePages && UseISM, "only for ISM large pages");
 
   size_t size = bytes;
--- a/src/os/windows/vm/os_windows.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -2595,7 +2595,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes) {
+char* os::reserve_memory_special(size_t bytes, char* addr) {
 
   if (UseLargePagesIndividualAllocation) {
     if (TracePageSizes && Verbose) {
@@ -2615,7 +2615,7 @@
         "use -XX:-UseLargePagesIndividualAllocation to turn off");
       return NULL;
     }
-    p_buf = (char *) VirtualAlloc(NULL,
+    p_buf = (char *) VirtualAlloc(addr,
                                  size_of_reserve,  // size of Reserve
                                  MEM_RESERVE,
                                  PAGE_EXECUTE_READWRITE);
--- a/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -30,5 +30,7 @@
 define_pd_global(uintx, JVMInvokeMethodSlack,    12288);
 define_pd_global(intx, CompilerThreadStackSize,  0);
 
+// Only used on 64 bit platforms
+define_pd_global(uintx, HeapBaseMinAddress,      4*G);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions, false);
--- a/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -43,5 +43,7 @@
 
 define_pd_global(uintx, JVMInvokeMethodSlack,    8192);
 
+// Only used on 64 bit platforms
+define_pd_global(uintx, HeapBaseMinAddress,      2*G);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -30,5 +30,9 @@
 define_pd_global(uintx, JVMInvokeMethodSlack,    12288);
 define_pd_global(intx, CompilerThreadStackSize,  0);
 
+// Only used on 64 bit platforms
+define_pd_global(uintx, HeapBaseMinAddress,      4*G);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
+
+
--- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -46,5 +46,7 @@
 
 define_pd_global(intx, CompilerThreadStackSize,  0);
 
+// Only used on 64 bit platforms
+define_pd_global(uintx, HeapBaseMinAddress,      256*M);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -45,5 +45,7 @@
 
 define_pd_global(uintx, JVMInvokeMethodSlack,    8192);
 
+// Only used on 64 bit platforms
+define_pd_global(uintx, HeapBaseMinAddress,      2*G);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/src/share/vm/asm/assembler.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/asm/assembler.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -321,16 +321,19 @@
 bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
   // Exception handler checks the nmethod's implicit null checks table
   // only when this method returns false.
-  if (UseCompressedOops) {
+#ifdef _LP64
+  if (UseCompressedOops && Universe::narrow_oop_base() != NULL) {
+    assert (Universe::heap() != NULL, "java heap should be initialized");
     // The first page after heap_base is unmapped and
     // the 'offset' is equal to [heap_base + offset] for
     // narrow oop implicit null checks.
-    uintptr_t heap_base = (uintptr_t)Universe::heap_base();
-    if ((uintptr_t)offset >= heap_base) {
+    uintptr_t base = (uintptr_t)Universe::narrow_oop_base();
+    if ((uintptr_t)offset >= base) {
       // Normalize offset for the next check.
-      offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
+      offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1));
     }
   }
+#endif
   return offset < 0 || os::vm_page_size() <= offset;
 }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -1360,9 +1360,34 @@
   // Reserve the maximum.
   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
   // Includes the perm-gen.
+
+  const size_t total_reserved = max_byte_size + pgs->max_size();
+  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
+
   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
                         HeapRegion::GrainBytes,
-                        false /*ism*/);
+                        false /*ism*/, addr);
+
+  if (UseCompressedOops) {
+    if (addr != NULL && !heap_rs.is_reserved()) {
+      // Failed to reserve at specified address - the requested memory
+      // region is taken already, for example, by 'java' launcher.
+      // Try again to reserver heap higher.
+      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+      ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
+                             false /*ism*/, addr);
+      if (addr != NULL && !heap_rs0.is_reserved()) {
+        // Failed to reserve at specified address again - give up.
+        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+        assert(addr == NULL, "");
+        ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
+                               false /*ism*/, addr);
+        heap_rs = heap_rs1;
+      } else {
+        heap_rs = heap_rs0;
+      }
+    }
+  }
 
   if (!heap_rs.is_reserved()) {
     vm_exit_during_initialization("Could not reserve enough space for object heap");
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -104,12 +104,38 @@
                   og_min_size, og_max_size,
                   yg_min_size, yg_max_size);
 
+  const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
+  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
+
   // The main part of the heap (old gen + young gen) can often use a larger page
   // size than is needed or wanted for the perm gen.  Use the "compound
   // alignment" ReservedSpace ctor to avoid having to use the same page size for
   // all gens.
+
   ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
-                            og_align);
+                            og_align, addr);
+
+  if (UseCompressedOops) {
+    if (addr != NULL && !heap_rs.is_reserved()) {
+      // Failed to reserve at specified address - the requested memory
+      // region is taken already, for example, by 'java' launcher.
+      // Try again to reserver heap higher.
+      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+      ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
+                                 og_align, addr);
+      if (addr != NULL && !heap_rs0.is_reserved()) {
+        // Failed to reserve at specified address again - give up.
+        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+        assert(addr == NULL, "");
+        ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
+                                   og_align, addr);
+        heap_rs = heap_rs1;
+      } else {
+        heap_rs = heap_rs0;
+      }
+    }
+  }
+
   os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
                        heap_rs.base(), pg_max_size);
   os::trace_page_sizes("ps main", og_min_size + yg_min_size,
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -218,6 +218,31 @@
     heap_address -= total_reserved;
   } else {
     heap_address = NULL;  // any address will do.
+    if (UseCompressedOops) {
+      heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
+      *_total_reserved = total_reserved;
+      *_n_covered_regions = n_covered_regions;
+      *heap_rs = ReservedHeapSpace(total_reserved, alignment,
+                                   UseLargePages, heap_address);
+
+      if (heap_address != NULL && !heap_rs->is_reserved()) {
+        // Failed to reserve at specified address - the requested memory
+        // region is taken already, for example, by 'java' launcher.
+        // Try again to reserver heap higher.
+        heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+        *heap_rs = ReservedHeapSpace(total_reserved, alignment,
+                                     UseLargePages, heap_address);
+
+        if (heap_address != NULL && !heap_rs->is_reserved()) {
+          // Failed to reserve at specified address again - give up.
+          heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+          assert(heap_address == NULL, "");
+          *heap_rs = ReservedHeapSpace(total_reserved, alignment,
+                                       UseLargePages, heap_address);
+        }
+      }
+      return heap_address;
+    }
   }
 
   *_total_reserved = total_reserved;
--- a/src/share/vm/memory/universe.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/memory/universe.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -99,7 +99,8 @@
 size_t          Universe::_heap_used_at_last_gc = 0;
 
 CollectedHeap*  Universe::_collectedHeap = NULL;
-address         Universe::_heap_base = NULL;
+
+NarrowOopStruct Universe::_narrow_oop = { NULL, 0, true };
 
 
 void Universe::basic_type_classes_do(void f(klassOop)) {
@@ -729,6 +730,53 @@
   return JNI_OK;
 }
 
+// Choose the heap base address and oop encoding mode
+// when compressed oops are used:
+// Unscaled  - Use 32-bits oops without encoding when
+//     NarrowOopHeapBaseMin + heap_size < 4Gb
+// ZeroBased - Use zero based compressed oops with encoding when
+//     NarrowOopHeapBaseMin + heap_size < 32Gb
+// HeapBased - Use compressed oops with heap base + encoding.
+
+// 4Gb
+static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
+// 32Gb
+static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
+
+char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+#ifdef _LP64
+  if (UseCompressedOops) {
+    assert(mode == UnscaledNarrowOop  ||
+           mode == ZeroBasedNarrowOop ||
+           mode == HeapBasedNarrowOop, "mode is invalid");
+
+    const size_t total_size = heap_size + HeapBaseMinAddress;
+    if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
+      if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
+          (Universe::narrow_oop_shift() == 0)) {
+        // Use 32-bits oops without encoding and
+        // place heap's top on the 4Gb boundary
+        return (char*)(NarrowOopHeapMax - heap_size);
+      } else {
+        // Can't reserve with NarrowOopShift == 0
+        Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+        if (mode == UnscaledNarrowOop ||
+            mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
+          // Use zero based compressed oops with encoding and
+          // place heap's top on the 32Gb boundary in case
+          // total_size > 4Gb or failed to reserve below 4Gb.
+          return (char*)(OopEncodingHeapMax - heap_size);
+        }
+      }
+    } else {
+      // Can't reserve below 32Gb.
+      Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+    }
+  }
+#endif
+  return NULL; // also return NULL (don't care) for 32-bit VM
+}
+
 jint Universe::initialize_heap() {
 
   if (UseParallelGC) {
@@ -773,6 +821,8 @@
   if (status != JNI_OK) {
     return status;
   }
+
+#ifdef _LP64
   if (UseCompressedOops) {
     // Subtract a page because something can get allocated at heap base.
     // This also makes implicit null checking work, because the
@@ -780,8 +830,49 @@
     // See needs_explicit_null_check.
     // Only set the heap base for compressed oops because it indicates
     // compressed oops for pstack code.
-    Universe::_heap_base = Universe::heap()->base() - os::vm_page_size();
+    if (PrintCompressedOopsMode) {
+      tty->cr();
+      tty->print("heap address: "PTR_FORMAT, Universe::heap()->base());
+    }
+    if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) {
+      // Can't reserve heap below 32Gb.
+      Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size());
+      Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+      if (PrintCompressedOopsMode) {
+        tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
+      }
+    } else {
+      Universe::set_narrow_oop_base(0);
+      if (PrintCompressedOopsMode) {
+        tty->print(", zero based Compressed Oops");
+      }
+#ifdef _WIN64
+      if (!Universe::narrow_oop_use_implicit_null_checks()) {
+        // Don't need guard page for implicit checks in indexed addressing
+        // mode with zero based Compressed Oops.
+        Universe::set_narrow_oop_use_implicit_null_checks(true);
+      }
+#endif //  _WIN64
+      if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
+        // Can't reserve heap below 4Gb.
+        Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+      } else {
+        assert(Universe::narrow_oop_shift() == 0, "use unscaled narrow oop");
+        if (PrintCompressedOopsMode) {
+          tty->print(", 32-bits Oops");
+        }
+      }
+    }
+    if (PrintCompressedOopsMode) {
+      tty->cr();
+      tty->cr();
+    }
   }
+  assert(Universe::narrow_oop_base() == (Universe::heap()->base() - os::vm_page_size()) ||
+         Universe::narrow_oop_base() == NULL, "invalid value");
+  assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
+         Universe::narrow_oop_shift() == 0, "invalid value");
+#endif
 
   // We will never reach the CATCH below since Exceptions::_throw will cause
   // the VM to exit if an exception is thrown during initialization
--- a/src/share/vm/memory/universe.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/memory/universe.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -90,6 +90,19 @@
   methodOop get_methodOop();
 };
 
+// For UseCompressedOops.
+struct NarrowOopStruct {
+  // Base address for oop-within-java-object materialization.
+  // NULL if using wide oops or zero based narrow oops.
+  address _base;
+  // Number of shift bits for encoding/decoding narrow oops.
+  // 0 if using wide oops or zero based unscaled narrow oops,
+  // LogMinObjAlignmentInBytes otherwise.
+  int     _shift;
+  // Generate code with implicit null checks for narrow oops.
+  bool    _use_implicit_null_checks;
+};
+
 
 class Universe: AllStatic {
   // Ugh.  Universe is much too friendly.
@@ -181,9 +194,9 @@
 
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
-  // Base address for oop-within-java-object materialization.
-  // NULL if using wide oops.  Doubles as heap oop null value.
-  static address        _heap_base;
+
+  // For UseCompressedOops.
+  static struct NarrowOopStruct _narrow_oop;
 
   // array of dummy objects used with +FullGCAlot
   debug_only(static objArrayOop _fullgc_alot_dummy_array;)
@@ -328,8 +341,25 @@
   static CollectedHeap* heap() { return _collectedHeap; }
 
   // For UseCompressedOops
-  static address heap_base()       { return _heap_base; }
-  static address* heap_base_addr() { return &_heap_base; }
+  static address* narrow_oop_base_addr()              { return &_narrow_oop._base; }
+  static address  narrow_oop_base()                   { return  _narrow_oop._base; }
+  static int      narrow_oop_shift()                  { return  _narrow_oop._shift; }
+  static void     set_narrow_oop_base(address base)   { _narrow_oop._base  = base; }
+  static void     set_narrow_oop_shift(int shift)     { _narrow_oop._shift = shift; }
+  static bool     narrow_oop_use_implicit_null_checks()             { return  _narrow_oop._use_implicit_null_checks; }
+  static void     set_narrow_oop_use_implicit_null_checks(bool use) { _narrow_oop._use_implicit_null_checks = use; }
+  // Narrow Oop encoding mode:
+  // 0 - Use 32-bits oops without encoding when
+  //     NarrowOopHeapBaseMin + heap_size < 4Gb
+  // 1 - Use zero based compressed oops with encoding when
+  //     NarrowOopHeapBaseMin + heap_size < 32Gb
+  // 2 - Use compressed oops with heap base + encoding.
+  enum NARROW_OOP_MODE {
+    UnscaledNarrowOop  = 0,
+    ZeroBasedNarrowOop = 1,
+    HeapBasedNarrowOop = 2
+  };
+  static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
 
   // Historic gc information
   static size_t get_heap_capacity_at_last_gc()         { return _heap_capacity_at_last_gc; }
--- a/src/share/vm/oops/oop.inline.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/oops/oop.inline.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -148,10 +148,11 @@
 
 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   assert(!is_null(v), "oop value can never be zero");
-  address heap_base = Universe::heap_base();
-  uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1));
+  address base = Universe::narrow_oop_base();
+  int    shift = Universe::narrow_oop_shift();
+  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
   assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
-  uint64_t result = pd >> LogMinObjAlignmentInBytes;
+  uint64_t result = pd >> shift;
   assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
   return (narrowOop)result;
 }
@@ -162,8 +163,9 @@
 
 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
   assert(!is_null(v), "narrow oop value can never be zero");
-  address heap_base = Universe::heap_base();
-  return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes));
+  address base = Universe::narrow_oop_base();
+  int    shift = Universe::narrow_oop_shift();
+  return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
 }
 
 inline oop oopDesc::decode_heap_oop(narrowOop v) {
--- a/src/share/vm/opto/addnode.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/opto/addnode.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -756,7 +756,13 @@
       if ( eti == NULL ) {
         // there must be one pointer among the operands
         guarantee(tptr == NULL, "must be only one pointer operand");
-        tptr = et->isa_oopptr();
+        if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
+          // 32-bits narrow oop can be the base of address expressions
+          tptr = et->make_ptr()->isa_oopptr();
+        } else {
+          // only regular oops are expected here
+          tptr = et->isa_oopptr();
+        }
         guarantee(tptr != NULL, "non-int operand must be pointer");
         if (tptr->higher_equal(tp->add_offset(tptr->offset())))
           tp = tptr; // Set more precise type for bailout
--- a/src/share/vm/opto/compile.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/opto/compile.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -2081,7 +2081,7 @@
 
 #ifdef _LP64
   case Op_CastPP:
-    if (n->in(1)->is_DecodeN() && UseImplicitNullCheckForNarrowOop) {
+    if (n->in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks()) {
       Compile* C = Compile::current();
       Node* in1 = n->in(1);
       const Type* t = n->bottom_type();
@@ -2136,7 +2136,7 @@
         new_in2 = in2->in(1);
       } else if (in2->Opcode() == Op_ConP) {
         const Type* t = in2->bottom_type();
-        if (t == TypePtr::NULL_PTR && UseImplicitNullCheckForNarrowOop) {
+        if (t == TypePtr::NULL_PTR && Universe::narrow_oop_use_implicit_null_checks()) {
           new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
           //
           // This transformation together with CastPP transformation above
--- a/src/share/vm/opto/connode.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/opto/connode.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -433,7 +433,7 @@
 // If not converting int->oop, throw away cast after constant propagation
 Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
   const Type *t = ccp->type(in(1));
-  if (!t->isa_oop_ptr() || in(1)->is_DecodeN()) {
+  if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks())) {
     return NULL; // do not transform raw pointers or narrow oops
   }
   return ConstraintCastNode::Ideal_DU_postCCP(ccp);
--- a/src/share/vm/opto/lcm.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/opto/lcm.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -158,7 +158,14 @@
           continue;             // Give up if offset is beyond page size
         // cannot reason about it; is probably not implicit null exception
       } else {
-        const TypePtr* tptr = base->bottom_type()->is_ptr();
+        const TypePtr* tptr;
+        if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
+          // 32-bits narrow oop can be the base of address expressions
+          tptr = base->bottom_type()->make_ptr();
+        } else {
+          // only regular oops are expected here
+          tptr = base->bottom_type()->is_ptr();
+        }
         // Give up if offset is not a compile-time constant
         if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot )
           continue;
--- a/src/share/vm/opto/matcher.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/opto/matcher.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -1481,8 +1481,13 @@
       const Type* mach_at = mach->adr_type();
       // DecodeN node consumed by an address may have different type
       // then its input. Don't compare types for such case.
-      if (m->adr_type() != mach_at && m->in(MemNode::Address)->is_AddP() &&
-          m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN()) {
+      if (m->adr_type() != mach_at &&
+          (m->in(MemNode::Address)->is_DecodeN() ||
+           m->in(MemNode::Address)->is_AddP() &&
+           m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN() ||
+           m->in(MemNode::Address)->is_AddP() &&
+           m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
+           m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeN())) {
         mach_at = m->adr_type();
       }
       if (m->adr_type() != mach_at) {
--- a/src/share/vm/runtime/arguments.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -1211,7 +1211,9 @@
     if (UseLargePages && UseCompressedOops) {
       // Cannot allocate guard pages for implicit checks in indexed addressing
       // mode, when large pages are specified on windows.
-      FLAG_SET_DEFAULT(UseImplicitNullCheckForNarrowOop, false);
+      // This flag could be switched ON if narrow oop base address is set to 0,
+      // see code in Universe::initialize_heap().
+      Universe::set_narrow_oop_use_implicit_null_checks(false);
     }
 #endif //  _WIN64
   } else {
--- a/src/share/vm/runtime/globals.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -303,11 +303,14 @@
             "Use 32-bit object references in 64-bit VM. "                   \
             "lp64_product means flag is always constant in 32 bit VM")      \
                                                                             \
-  lp64_product(bool, CheckCompressedOops, trueInDebug,                      \
-            "generate checks in encoding/decoding code")                    \
-                                                                            \
-  product(bool, UseImplicitNullCheckForNarrowOop, true,                     \
-            "generate implicit null check in indexed addressing mode.")     \
+  notproduct(bool, CheckCompressedOops, true,                               \
+            "generate checks in encoding/decoding code in debug VM")        \
+                                                                            \
+  product_pd(uintx, HeapBaseMinAddress,                                     \
+            "OS specific low limit for heap base address")                  \
+                                                                            \
+  diagnostic(bool, PrintCompressedOopsMode, false,                          \
+            "Print compressed oops base address and encoding mode")         \
                                                                             \
   /* UseMembar is theoretically a temp flag used for memory barrier         \
    * removal testing.  It was supposed to be removed before FCS but has     \
--- a/src/share/vm/runtime/os.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/runtime/os.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -243,7 +243,7 @@
 
   static char*  non_memory_address_word();
   // reserve, commit and pin the entire memory region
-  static char*  reserve_memory_special(size_t size);
+  static char*  reserve_memory_special(size_t size, char* addr = NULL);
   static bool   release_memory_special(char* addr, size_t bytes);
   static bool   large_page_init();
   static size_t large_page_size();
--- a/src/share/vm/runtime/virtualspace.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/runtime/virtualspace.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -109,6 +109,7 @@
                              const size_t prefix_align,
                              const size_t suffix_size,
                              const size_t suffix_align,
+                             char* requested_address,
                              const size_t noaccess_prefix)
 {
   assert(prefix_size != 0, "sanity");
@@ -131,7 +132,7 @@
   const bool try_reserve_special = UseLargePages &&
     prefix_align == os::large_page_size();
   if (!os::can_commit_large_page_memory() && try_reserve_special) {
-    initialize(size, prefix_align, true, NULL, noaccess_prefix);
+    initialize(size, prefix_align, true, requested_address, noaccess_prefix);
     return;
   }
 
@@ -146,7 +147,13 @@
          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 
   // Optimistically try to reserve the exact size needed.
-  char* addr = os::reserve_memory(size, NULL, prefix_align);
+  char* addr;
+  if (requested_address != 0) {
+    addr = os::attempt_reserve_memory_at(size,
+                                         requested_address-noaccess_prefix);
+  } else {
+    addr = os::reserve_memory(size, NULL, prefix_align);
+  }
   if (addr == NULL) return;
 
   // Check whether the result has the needed alignment (unlikely unless
@@ -206,12 +213,8 @@
   char* base = NULL;
 
   if (special) {
-    // It's not hard to implement reserve_memory_special() such that it can
-    // allocate at fixed address, but there seems no use of this feature
-    // for now, so it's not implemented.
-    assert(requested_address == NULL, "not implemented");
 
-    base = os::reserve_memory_special(size);
+    base = os::reserve_memory_special(size, requested_address);
 
     if (base != NULL) {
       // Check alignment constraints
@@ -372,7 +375,8 @@
                                      bool large, char* requested_address) :
   ReservedSpace(size, alignment, large,
                 requested_address,
-                UseCompressedOops && UseImplicitNullCheckForNarrowOop ?
+                (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
+                 Universe::narrow_oop_use_implicit_null_checks()) ?
                   lcm(os::vm_page_size(), alignment) : 0) {
   // Only reserved space for the java heap should have a noaccess_prefix
   // if using compressed oops.
@@ -382,9 +386,12 @@
 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
                                      const size_t prefix_align,
                                      const size_t suffix_size,
-                                     const size_t suffix_align) :
+                                     const size_t suffix_align,
+                                     char* requested_address) :
   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
-                UseCompressedOops && UseImplicitNullCheckForNarrowOop ?
+                requested_address,
+                (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
+                 Universe::narrow_oop_use_implicit_null_checks()) ?
                   lcm(os::vm_page_size(), prefix_align) : 0) {
   protect_noaccess_prefix(prefix_size+suffix_size);
 }
--- a/src/share/vm/runtime/virtualspace.hpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/runtime/virtualspace.hpp	Thu Mar 12 10:37:46 2009 -0700
@@ -73,7 +73,8 @@
                 const size_t noaccess_prefix = 0);
   ReservedSpace(const size_t prefix_size, const size_t prefix_align,
                 const size_t suffix_size, const size_t suffix_align,
-                const size_t noaccess_prefix);
+                char* requested_address,
+                const size_t noaccess_prefix = 0);
 
   // Accessors
   char*  base()      const { return _base;      }
@@ -121,7 +122,8 @@
   ReservedHeapSpace(size_t size, size_t forced_base_alignment,
                     bool large, char* requested_address);
   ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align,
-                    const size_t suffix_size, const size_t suffix_align);
+                    const size_t suffix_size, const size_t suffix_align,
+                    char* requested_address);
 };
 
 // VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
--- a/src/share/vm/runtime/vmStructs.cpp	Wed Mar 11 14:16:13 2009 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Thu Mar 12 10:37:46 2009 -0700
@@ -263,7 +263,9 @@
      static_field(Universe,                    _bootstrapping,                                bool)                                  \
      static_field(Universe,                    _fully_initialized,                            bool)                                  \
      static_field(Universe,                    _verify_count,                                 int)                                   \
-     static_field(Universe,                    _heap_base,                                    address)                                   \
+     static_field(Universe,                    _narrow_oop._base,                             address)                               \
+     static_field(Universe,                    _narrow_oop._shift,                            int)                                   \
+     static_field(Universe,                    _narrow_oop._use_implicit_null_checks,         bool)                                  \
                                                                                                                                      \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \