changeset 2820:44ce519bc3d1

7104960: JSR 292: +VerifyMethodHandles in product JVM can overflow buffer Reviewed-by: kvn, jrose, twisti
author never
date Tue, 08 Nov 2011 10:31:53 -0800
parents 59e515ee9354
children c9a03402fe56
files src/cpu/sparc/vm/assembler_sparc.inline.hpp src/cpu/sparc/vm/methodHandles_sparc.cpp src/cpu/sparc/vm/methodHandles_sparc.hpp src/cpu/x86/vm/methodHandles_x86.cpp src/cpu/x86/vm/methodHandles_x86.hpp src/share/vm/asm/codeBuffer.cpp src/share/vm/asm/codeBuffer.hpp src/share/vm/prims/methodHandles.cpp src/share/vm/runtime/globals.hpp
diffstat 9 files changed, 74 insertions(+), 32 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Tue Nov 08 10:31:53 2011 -0800
@@ -597,6 +597,10 @@
 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
 
 inline bool MacroAssembler::is_far_target(address d) {
+  if (ForceUnreachable) {
+    // References outside the code cache should be treated as far
+    return d < CodeCache::low_bound() || d > CodeCache::high_bound();
+  }
   return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
 }
 
@@ -679,28 +683,44 @@
 
 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
   assert_not_delayed();
-  sethi(addrlit, d);
+  if (ForceUnreachable) {
+    patchable_sethi(addrlit, d);
+  } else {
+    sethi(addrlit, d);
+  }
   ld(d, addrlit.low10() + offset, d);
 }
 
 
 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
   assert_not_delayed();
-  sethi(addrlit, d);
+  if (ForceUnreachable) {
+    patchable_sethi(addrlit, d);
+  } else {
+    sethi(addrlit, d);
+  }
   ld_ptr(d, addrlit.low10() + offset, d);
 }
 
 
 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
   assert_not_delayed();
-  sethi(addrlit, temp);
+  if (ForceUnreachable) {
+    patchable_sethi(addrlit, temp);
+  } else {
+    sethi(addrlit, temp);
+  }
   st(s, temp, addrlit.low10() + offset);
 }
 
 
 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
   assert_not_delayed();
-  sethi(addrlit, temp);
+  if (ForceUnreachable) {
+    patchable_sethi(addrlit, temp);
+  } else {
+    sethi(addrlit, temp);
+  }
   st_ptr(s, temp, addrlit.low10() + offset);
 }
 
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Tue Nov 08 10:31:53 2011 -0800
@@ -352,6 +352,7 @@
   BLOCK_COMMENT("load_stack_move {");
   __ ldsw(G3_amh_conversion, stack_move_reg);
   __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg);
+#ifdef ASSERT
   if (VerifyMethodHandles) {
     Label L_ok, L_bad;
     int32_t stack_move_limit = 0x0800;  // extra-large
@@ -363,6 +364,7 @@
     __ stop("load_stack_move of garbage value");
     __ BIND(L_ok);
   }
+#endif
   BLOCK_COMMENT("} load_stack_move");
 }
 
--- a/src/cpu/sparc/vm/methodHandles_sparc.hpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/cpu/sparc/vm/methodHandles_sparc.hpp	Tue Nov 08 10:31:53 2011 -0800
@@ -27,7 +27,7 @@
 
 // Adapters
 enum /* platform_dependent_constants */ {
-  adapter_code_size = NOT_LP64(22000 DEBUG_ONLY(+ 40000)) LP64_ONLY(32000 DEBUG_ONLY(+ 80000))
+  adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000))
 };
 
 public:
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Nov 08 10:31:53 2011 -0800
@@ -382,6 +382,7 @@
     __ movslq(rdi_stack_move, rdi_stack_move);
   }
 #endif //_LP64
+#ifdef ASSERT
   if (VerifyMethodHandles) {
     Label L_ok, L_bad;
     int32_t stack_move_limit = 0x4000;  // extra-large
@@ -393,6 +394,7 @@
     __ stop("load_stack_move of garbage value");
     __ BIND(L_ok);
   }
+#endif
   BLOCK_COMMENT("} load_stack_move");
 }
 
--- a/src/cpu/x86/vm/methodHandles_x86.hpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/cpu/x86/vm/methodHandles_x86.hpp	Tue Nov 08 10:31:53 2011 -0800
@@ -27,7 +27,7 @@
 
 // Adapters
 enum /* platform_dependent_constants */ {
-  adapter_code_size = NOT_LP64(30000 DEBUG_ONLY(+ 10000)) LP64_ONLY(80000 DEBUG_ONLY(+ 120000))
+  adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 80000))
 };
 
 public:
--- a/src/share/vm/asm/codeBuffer.cpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/share/vm/asm/codeBuffer.cpp	Tue Nov 08 10:31:53 2011 -0800
@@ -26,6 +26,7 @@
 #include "asm/codeBuffer.hpp"
 #include "compiler/disassembler.hpp"
 #include "utilities/copy.hpp"
+#include "utilities/xmlstream.hpp"
 
 // The structure of a CodeSection:
 //
@@ -81,7 +82,7 @@
 CodeBuffer::CodeBuffer(CodeBlob* blob) {
   initialize_misc("static buffer");
   initialize(blob->content_begin(), blob->content_size());
-  assert(verify_section_allocation(), "initial use of buffer OK");
+  verify_section_allocation();
 }
 
 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
@@ -108,17 +109,18 @@
     _insts.initialize_locs(locs_size / sizeof(relocInfo));
   }
 
-  assert(verify_section_allocation(), "initial use of blob is OK");
+  verify_section_allocation();
 }
 
 
 CodeBuffer::~CodeBuffer() {
+  verify_section_allocation();
+
   // If we allocate our code buffer from the CodeCache
   // via a BufferBlob, and it's not permanent, then
   // free the BufferBlob.
   // The rest of the memory will be freed when the ResourceObj
   // is released.
-  assert(verify_section_allocation(), "final storage configuration still OK");
   for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) {
     // Previous incarnations of this buffer are held live, so that internal
     // addresses constructed before expansions will not be confused.
@@ -484,7 +486,7 @@
 
   // Done calculating sections; did it come out to the right end?
   assert(buf_offset == total_content_size(), "sanity");
-  assert(dest->verify_section_allocation(), "final configuration works");
+  dest->verify_section_allocation();
 }
 
 csize_t CodeBuffer::total_offset_of(CodeSection* cs) const {
@@ -810,7 +812,7 @@
   _decode_begin = NULL;  // sanity
 
   // Make certain that the new sections are all snugly inside the new blob.
-  assert(verify_section_allocation(), "expanded allocation is ship-shape");
+  verify_section_allocation();
 
 #ifndef PRODUCT
   if (PrintNMethods && (WizardMode || Verbose)) {
@@ -839,35 +841,48 @@
   DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
 }
 
-#ifdef ASSERT
-bool CodeBuffer::verify_section_allocation() {
+void CodeBuffer::verify_section_allocation() {
   address tstart = _total_start;
-  if (tstart == badAddress)  return true;  // smashed by set_blob(NULL)
+  if (tstart == badAddress)  return;  // smashed by set_blob(NULL)
   address tend   = tstart + _total_size;
   if (_blob != NULL) {
-    assert(tstart >= _blob->content_begin(), "sanity");
-    assert(tend   <= _blob->content_end(),   "sanity");
+
+    guarantee(tstart >= _blob->content_begin(), "sanity");
+    guarantee(tend   <= _blob->content_end(),   "sanity");
   }
   // Verify disjointness.
   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
     CodeSection* sect = code_section(n);
     if (!sect->is_allocated() || sect->is_empty())  continue;
-    assert((intptr_t)sect->start() % sect->alignment() == 0
+    guarantee((intptr_t)sect->start() % sect->alignment() == 0
            || sect->is_empty() || _blob == NULL,
            "start is aligned");
     for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) {
       CodeSection* other = code_section(m);
       if (!other->is_allocated() || other == sect)  continue;
-      assert(!other->contains(sect->start()    ), "sanity");
+      guarantee(!other->contains(sect->start()    ), "sanity");
       // limit is an exclusive address and can be the start of another
       // section.
-      assert(!other->contains(sect->limit() - 1), "sanity");
+      guarantee(!other->contains(sect->limit() - 1), "sanity");
     }
-    assert(sect->end() <= tend, "sanity");
+    guarantee(sect->end() <= tend, "sanity");
+    guarantee(sect->end() <= sect->limit(), "sanity");
   }
-  return true;
 }
-#endif //ASSERT
+
+void CodeBuffer::log_section_sizes(const char* name) {
+  if (xtty != NULL) {
+    // log info about buffer usage
+    xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size);
+    for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
+      CodeSection* sect = code_section(n);
+      if (!sect->is_allocated() || sect->is_empty())  continue;
+      xtty->print_cr("<sect index='%d' size='" SIZE_FORMAT "' free='" SIZE_FORMAT "'/>",
+                     n, sect->limit() - sect->start(), sect->limit() - sect->end());
+    }
+    xtty->print_cr("</blob>");
+  }
+}
 
 #ifndef PRODUCT
 
@@ -895,7 +910,6 @@
   _comments.add_comment(offset, comment);
 }
 
-
 class CodeComment: public CHeapObj {
  private:
   friend class CodeComments;
--- a/src/share/vm/asm/codeBuffer.hpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/share/vm/asm/codeBuffer.hpp	Tue Nov 08 10:31:53 2011 -0800
@@ -362,10 +362,8 @@
   // helper for CodeBuffer::expand()
   void take_over_code_from(CodeBuffer* cs);
 
-#ifdef ASSERT
   // ensure sections are disjoint, ordered, and contained in the blob
-  bool verify_section_allocation();
-#endif
+  void verify_section_allocation();
 
   // copies combined relocations to the blob, returns bytes copied
   // (if target is null, it is a dry run only, just for sizing)
@@ -393,7 +391,7 @@
     assert(code_start != NULL, "sanity");
     initialize_misc("static buffer");
     initialize(code_start, code_size);
-    assert(verify_section_allocation(), "initial use of buffer OK");
+    verify_section_allocation();
   }
 
   // (2) CodeBuffer referring to pre-allocated CodeBlob.
@@ -545,6 +543,9 @@
 
   void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
 
+  // Log a little info about section usage in the CodeBuffer
+  void log_section_sizes(const char* name);
+
 #ifndef PRODUCT
  public:
   // Printing / Decoding
--- a/src/share/vm/prims/methodHandles.cpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/share/vm/prims/methodHandles.cpp	Tue Nov 08 10:31:53 2011 -0800
@@ -206,9 +206,12 @@
   _adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
   if (_adapter_code == NULL)
     vm_exit_out_of_memory(adapter_code_size, "CodeCache: no room for MethodHandles adapters");
-  CodeBuffer code(_adapter_code);
-  MethodHandlesAdapterGenerator g(&code);
-  g.generate();
+  {
+    CodeBuffer code(_adapter_code);
+    MethodHandlesAdapterGenerator g(&code);
+    g.generate();
+    code.log_section_sizes("MethodHandlesAdapterBlob");
+  }
 }
 
 //------------------------------------------------------------------------------
--- a/src/share/vm/runtime/globals.hpp	Mon Nov 07 14:33:57 2011 -0800
+++ b/src/share/vm/runtime/globals.hpp	Tue Nov 08 10:31:53 2011 -0800
@@ -577,8 +577,8 @@
   develop(bool, VerifyStack, false,                                         \
           "Verify stack of each thread when it is entering a runtime call") \
                                                                             \
-  develop(bool, ForceUnreachable, false,                                    \
-          "(amd64) Make all non code cache addresses to be unreachable with rip-rel forcing use of 64bit literal fixups") \
+  diagnostic(bool, ForceUnreachable, false,                                 \
+          "Make all non code cache addresses to be unreachable with forcing use of 64bit literal fixups") \
                                                                             \
   notproduct(bool, StressDerivedPointers, false,                            \
           "Force scavenge when a derived pointers is detected on stack "    \