changeset 9:3713353e23db

Fix some bugs to make the jar file verification test passed using c1 compiler. Fix bugs: 1. Value of vmreg stands for a register. 2. Fix long register endianness bug. 3. The long arithmetic is implemented by C, we should implement it using assemble later. 4. The register of safepoint_poll is allocated and its return address manually set. 5. safepoint_return use register A0.
author YANG Yongqiang <yangyongqiang@loongson.cn>
date Fri, 15 Oct 2010 20:37:51 +0000
parents fc40477e29ae
children 7eeee95a5a53
files hotspot/src/cpu/mips/vm/c1_FrameMap_mips.cpp hotspot/src/cpu/mips/vm/c1_LIRAssembler_mips.cpp hotspot/src/cpu/mips/vm/c1_LIRGenerator_mips.cpp hotspot/src/cpu/mips/vm/jni_mips.h hotspot/src/cpu/mips/vm/nativeInst_mips.hpp hotspot/src/cpu/mips/vm/sharedRuntime_mips.cpp hotspot/src/cpu/mips/vm/vmreg_mips.cpp hotspot/src/cpu/mips/vm/vmreg_mips.inline.hpp hotspot/src/share/vm/c1/c1_Runtime1.cpp hotspot/src/share/vm/compiler/compileBroker.cpp hotspot/src/share/vm/runtime/sharedRuntime.cpp hotspot/src/share/vm/runtime/sharedRuntime.hpp
diffstat 12 files changed, 120 insertions(+), 109 deletions(-) [+]
line wrap: on
line diff
--- a/hotspot/src/cpu/mips/vm/c1_FrameMap_mips.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/c1_FrameMap_mips.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -52,7 +52,7 @@
 		Register reg = r_1->as_Register();
 		if (r_2->is_Register()) {
 			Register reg2 = r_2->as_Register();
-			opr = as_long_opr(reg2, reg);
+			opr = as_long_opr(reg, reg2);
 		} else if (type == T_OBJECT) {
 			opr = as_oop_opr(reg);
 		} else {
--- a/hotspot/src/cpu/mips/vm/c1_LIRAssembler_mips.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/c1_LIRAssembler_mips.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -548,37 +548,15 @@
 	assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0");
 	// Pop the stack before the safepoint code
 	__ leave();
-	/*if (compilation()->jvmpi_event_method_exit_enabled()) {
-		//   jvmpi_method_exit(compilation()->method(), result_is_oop);
-		// __ movl(ecx, method()->encoding());
-		//__ jmp(Runtime1::entry_for(Runtime1::jvmpi_method_exit_id), 
-		//relocInfo::runtime_call_type);
-		//__ lw(T0,method()->encoding());
-		int oop_index = __ oop_recorder()->find_index(method()->encoding());
-		RelocationHolder rspec = oop_Relocation::spec(oop_index);
-		__ relocate(rspec);
-		__ lui(T0, Assembler::split_high((int)method()->encoding()));
-		__ addiu(T0, T0, Assembler::split_low((int)method()->encoding()));
-
-		__ jmp(Runtime1::entry_for(Runtime1::jvmpi_method_exit_id), 
-				relocInfo::runtime_call_type);
-		__ delayed()->nop(); 
-	}
-	else*/{
-		
-			// the poll sets the condition code, but no data registers
-		//__ relocate(relocInfo::poll_return_type);
-		// __ testl(eax, polling_page);
-		//__ ret(0);
-		__ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() 
-				+ (SafepointPollOffset % os::vm_page_size())));
-		__ relocate(relocInfo::poll_return_type);
-		__ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() 
-				+ (SafepointPollOffset % os::vm_page_size())));
-		__ jr(RA);
-		__ delayed()->nop();
-	}
- }
+ //FIXME I have no idea it is safe to use A0
+	__ lui(A0, Assembler::split_high((intptr_t)os::get_polling_page() 
+			+ (SafepointPollOffset % os::vm_page_size())));
+	__ relocate(relocInfo::poll_return_type);
+	__ lw(AT, A0, Assembler::split_low((intptr_t)os::get_polling_page() 
+			+ (SafepointPollOffset % os::vm_page_size())));
+	__ jr(RA);
+	__ delayed()->nop();
+}
 
 //read protect mem to ZERO won't cause the exception only in godson-2e, So I modify ZERO to AT .@jerome,11/25,2006
 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
@@ -588,10 +566,11 @@
 		ShouldNotReachHere();
 	} 
 	int offset = __ offset();
-	__ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() 
+  Register r = tmp->as_register();
+	__ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() 
 				+ (SafepointPollOffset % os::vm_page_size())));
 	__ relocate(relocInfo::poll_type);
-	__ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() 
+	__ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() 
 				+ (SafepointPollOffset % os::vm_page_size())));
 	
 	return offset; 
@@ -3698,12 +3677,7 @@
 
 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
   // optimized version for linear scan:
-  // * count must be already in ECX (guaranteed by LinearScan)
-  // * left and dest must be equal
   // * tmp must be unused
-// for mips , i think , count store in which register is not required 
-  assert(count->as_register() == SHIFT_count, "count must be in ECX");
-  assert(left == dest, "left and dest must be equal");
   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
 
   if (left->is_single_cpu()) {
--- a/hotspot/src/cpu/mips/vm/c1_LIRGenerator_mips.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/c1_LIRGenerator_mips.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -163,7 +163,7 @@
 
 
 LIR_Opr LIRGenerator::safepoint_poll_register() {
-  return LIR_OprFact::illegalOpr;
+  return new_register(T_INT);
 }
 
 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
@@ -296,23 +296,6 @@
   Unimplemented();
 }
 
-/*
-void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
-  Unimplemented();
-}
-
-
-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
-  __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
-}
-
-
-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
-  __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
-}
-*/
-
-
 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
   if (tmp->is_valid()) {
     if (is_power_of_2(c + 1)) {
@@ -496,8 +479,6 @@
       CodeEmitInfo* info = state_for(x);
       LIR_Opr item = right.result();
       assert(item->is_register(), "must be");
-//      __ cmp(lir_cond_equal, item, LIR_OprFact::longConst(0));
- //     __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
       __ branch(lir_cond_equal,item,LIR_OprFact::longConst(0), T_LONG, new DivByZeroStub(info));
     }
 
@@ -521,7 +502,25 @@
     break;
   }
   case Bytecodes::_ladd:
-  case Bytecodes::_lsub: {
+  case Bytecodes::_lsub:  {
+    address entry;
+    switch (x->op()) {
+    case Bytecodes::_ladd:
+      entry = CAST_FROM_FN_PTR(address, SharedRuntime::ladd);
+      break; // check if dividend is 0 is done elsewhere
+    case Bytecodes::_lsub:
+      entry = CAST_FROM_FN_PTR(address, SharedRuntime::lsub);
+      break; // check if dividend is 0 is done elsewhere
+    default:
+      ShouldNotReachHere();
+    }
+    // order of arguments to runtime call is reversed.
+    LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
+    set_result(x, result);
+    break;
+  }
+
+/*  {
     LIRItem left(x->x(), this);
     LIRItem right(x->y(), this);
     left.load_item();
@@ -531,12 +530,14 @@
     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
     break;
   }
+*/
   default: ShouldNotReachHere();
   }
 }
 
 
 
+
 // for: _iadd, _imul, _isub, _idiv, _irem
 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 	bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
@@ -587,6 +588,29 @@
 
 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
+  if(x->op() == Bytecodes::_lshl
+      ||  x->op() == Bytecodes::_lshr
+      ||  x->op() == Bytecodes::_lushr) {
+    address entry;
+    switch (x->op()) {
+    case Bytecodes::_lshl:
+      entry = CAST_FROM_FN_PTR(address, SharedRuntime::lshl);
+      break; // check if dividend is 0 is done elsewhere
+    case Bytecodes::_lshr:
+      entry = CAST_FROM_FN_PTR(address, SharedRuntime::lshr);
+      break; // check if dividend is 0 is done elsewhere
+    case Bytecodes::_lushr:
+      entry = CAST_FROM_FN_PTR(address, SharedRuntime::lushr);
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+    // order of arguments to runtime call is reversed.
+    LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
+    set_result(x, result);  
+    return;
+  }
+
   // count must always be in rcx
   LIRItem value(x->x(), this);
   LIRItem count(x->y(), this);
@@ -595,7 +619,7 @@
   bool must_load_count = !count.is_constant() || elemType == longTag;
   if (must_load_count) {
     // count for long must be in register
-    count.load_item_force(shiftCountOpr());
+    count.load_item();
   } else {
     count.dont_load_item();
   }
@@ -1174,7 +1198,7 @@
 		// increment backedge counter if needed
 		increment_backedge_counter(state_for(x, x->state_before()));
 
-		__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
+		__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
 	}
 	set_no_result(x);
 
--- a/hotspot/src/cpu/mips/vm/jni_mips.h	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/jni_mips.h	Fri Oct 15 20:37:51 2010 +0000
@@ -32,8 +32,10 @@
 
 #ifdef _LP64
   typedef long jlong;
+  typedef unsigned long ujlong;
 #else
   typedef long long jlong;
+  typedef unsigned long long ujlong;
 #endif
 
 typedef signed char jbyte;
--- a/hotspot/src/cpu/mips/vm/nativeInst_mips.hpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/nativeInst_mips.hpp	Fri Oct 15 20:37:51 2010 +0000
@@ -489,7 +489,6 @@
 inline bool NativeInstruction::is_safepoint_poll() {
   return 
          is_op(long_at(-4), Assembler::lui_op) && 
-         is_rt(long_at(-4), AT) && 
          is_op(Assembler::lw_op) && 
          is_rt(AT);
 }
--- a/hotspot/src/cpu/mips/vm/sharedRuntime_mips.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/sharedRuntime_mips.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -4058,7 +4058,7 @@
 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
 
   // Account for thread arg in our frame
-  const int additional_words = 0; 
+  const int additional_words = 1; 
   int frame_size_in_words;
 
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");  
@@ -4084,16 +4084,22 @@
   // Otherwise we push space for a return address that the safepoint
   // handler will install later to make the stack walking sensible.
 	// i dont think we need this in godson.
+#ifndef OPT_THREAD
+  __ get_thread(thread);
+#endif
+
+  if(!cause_return) {
+    __ lw(RA, Address(thread, JavaThread::saved_exception_pc_offset()));
+  }
   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
   
   // The following is basically a call_VM. However, we need the precise
   // address of the call in order to generate an oopmap. Hence, we do all the
   // work outselvs.
 
-  // Push thread argument and setup last_Java_sp
-#ifndef OPT_THREAD
-  __ get_thread(thread);
-#endif
+  //i pass thread argument and setup last_Java_sp
+  __ addiu(SP, SP, -wordSize); 
+
   __ move(A0, thread);
   __ set_last_Java_frame(thread, NOREG, NOREG, NULL);
   __ relocate(relocInfo::internal_pc_type); 
@@ -4117,7 +4123,7 @@
   // debug-info recordings, as well as let GC find all oops.
 
   oop_maps->add_gc_map(__ offset(),  map);
-
+  __ addiu(SP, SP, wordSize);
 
   Label noException;
 
@@ -4132,42 +4138,19 @@
 
   RegisterSaver::restore_live_registers(masm);
   //forward_exception_entry need return address on the stack
+  __ push(RA);
   __ lui(T9, Assembler::split_high((int)StubRoutines::forward_exception_entry()));
   __ addiu(T9, T9, Assembler::split_low((int)StubRoutines::forward_exception_entry()));
   __ jr(T9);
   __ delayed()->nop();
 
   // No exception case
-  Label continueL;
-
   __ bind(noException);
-  __ slt(AT, V0, ZERO);
-  __ bne(AT, ZERO, continueL);
-  __ delayed()->nop();
-
   // Normal exit, register restoring and exit  
   RegisterSaver::restore_live_registers(masm);
-
   __ jr(RA);
   __ delayed()->nop();
   
-  // we have deoptimized at a blocked call, we may not reexecute the
-  // instruction as we would skip the call in interpreter; therefore
-  // execute the destination of the call; the destination is valid
-  // because the receiver was already consumed
-  // ecx holds the destination of the call
-  __ bind(continueL);
-  __ move(RA, V0);
-  RegisterSaver::restore_live_registers(masm);
-
-  // Everything is just like we were at entry (except ebx)
-  // original return address is still there too (we deopt on return)
-  // just continue with the call.
-  __ jr(RA);
-  __ delayed()->nop();
-
- 
-  // make sure all code is generated
   masm->flush();  
 
   // Fill-out other meta info
--- a/hotspot/src/cpu/mips/vm/vmreg_mips.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/vmreg_mips.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -33,7 +33,6 @@
   int i;
   for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
     regName[i++] = reg->name();
-    regName[i++] = reg->name();
     reg = reg->successor();
   }
 
--- a/hotspot/src/cpu/mips/vm/vmreg_mips.inline.hpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/cpu/mips/vm/vmreg_mips.inline.hpp	Fri Oct 15 20:37:51 2010 +0000
@@ -26,7 +26,7 @@
 inline VMReg RegisterImpl::as_VMReg() {
   if( this==noreg ) return VMRegImpl::Bad();
 	//FIXME why encoding << 1? what is the meaning of the VMReg's value
-  return VMRegImpl::as_VMReg(encoding() << 1 );
+  return VMRegImpl::as_VMReg(encoding());
 }
 
 inline VMReg FloatRegisterImpl::as_VMReg() {
@@ -41,7 +41,7 @@
 
   assert( is_Register(), "must be");
   // Yuk
-  return ::as_Register(value() >> 1);
+  return ::as_Register(value());
 }
 
 inline FloatRegister VMRegImpl::as_FloatRegister() {
@@ -53,15 +53,8 @@
 
 inline   bool VMRegImpl::is_concrete() {
   assert(is_reg(), "must be");
-  int v = value();
-  if ( v  <  ConcreteRegisterImpl::max_gpr ) {
-    return is_even(v);
-  }
-  // F0..F31
-  if ( v <= ConcreteRegisterImpl::max_gpr + 31) return true;
-  if ( v <  ConcreteRegisterImpl::max_fpr) {
-    return is_even(v);
-  }
+  if(is_Register()) return true;
+  if(is_FloatRegister()) return true;
   assert(false, "what register?");
   return false;
 }
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -273,6 +273,11 @@
   FUNCTION_CASE(entry, SharedRuntime::lmul);
   FUNCTION_CASE(entry, SharedRuntime::lrem);
   FUNCTION_CASE(entry, SharedRuntime::lrem);
+  FUNCTION_CASE(entry, SharedRuntime::ladd);
+  FUNCTION_CASE(entry, SharedRuntime::lsub);
+  FUNCTION_CASE(entry, SharedRuntime::lshl);
+  FUNCTION_CASE(entry, SharedRuntime::lshr);
+  FUNCTION_CASE(entry, SharedRuntime::lushr);
   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
   FUNCTION_CASE(entry, trace_block_entry);
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -1114,16 +1114,16 @@
   // Some compilers may not support the compilation of natives.
   // QQQ this needs some work ought to only record not compilable at
   // the specified level
-  if (is_native &&
-      (!CICompileNatives || !compiler(comp_level)->supports_native())) {
+  if (is_native /*&&
+      (!CICompileNatives || !compiler(comp_level)->supports_native())*/) {
     method->set_not_compilable();
     return true;
   }
 
   bool is_osr = (osr_bci != standard_entry_bci);
   // Some compilers may not support on stack replacement.
-  if (is_osr /*&&
-      (!CICompileOSR || !compiler(comp_level)->supports_osr())*/) {
+  if (is_osr &&
+      (!CICompileOSR || !compiler(comp_level)->supports_osr())) {
     method->set_not_osr_compilable();
     return true;
   }
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Oct 15 20:37:51 2010 +0000
@@ -163,6 +163,31 @@
 JRT_END
 
 
+JRT_LEAF(jlong, SharedRuntime::ladd(jlong y, jlong x))
+  return x + y;
+JRT_END
+
+
+JRT_LEAF(jlong, SharedRuntime::lsub(jlong y, jlong x))
+  return x - y;
+JRT_END
+
+
+JRT_LEAF(jlong, SharedRuntime::lshl(int y, jlong x))
+  return x << y;
+JRT_END
+
+
+JRT_LEAF(jlong, SharedRuntime::lshr(int y, jlong x))
+  return x >> y;
+JRT_END
+
+
+JRT_LEAF(ujlong, SharedRuntime::lushr(int y, ujlong x))
+  return x >> y;
+JRT_END
+
+
 const juint  float_sign_mask  = 0x7FFFFFFF;
 const juint  float_infinity   = 0x7F800000;
 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Tue Oct 12 18:28:27 2010 +0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Oct 15 20:37:51 2010 +0000
@@ -72,6 +72,13 @@
   static jlong   lmul(jlong y, jlong x);
   static jlong   ldiv(jlong y, jlong x);
   static jlong   lrem(jlong y, jlong x);
+  static jlong   ladd(jlong y, jlong x);
+  static jlong   lsub(jlong y, jlong x);
+
+  static jlong   lshl(int y, jlong x);
+  static jlong   lshr(int y, jlong x);
+  static ujlong   lushr(int y, ujlong x);
+
 
   // float and double remainder
   static jfloat  frem(jfloat  x, jfloat  y);