summaryrefslogtreecommitdiffstats
path: root/vm/compiler/codegen/x86
diff options
context:
space:
mode:
authorElliott Hughes <enh@google.com>2013-04-01 23:04:54 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2013-04-01 23:04:54 +0000
commitbf452d484529d0022f08cb2efcf31bc30f7ed7d8 (patch)
tree1a2cfbbc992b0e47f763f5d2c6b9256fc63f03a1 /vm/compiler/codegen/x86
parentd335cba5c2868922abf2fa09432f8a938907e2da (diff)
parent19eb287ac848f10e03ca2614bf53bd9d1ddd3724 (diff)
downloadandroid_dalvik-bf452d484529d0022f08cb2efcf31bc30f7ed7d8.tar.gz
android_dalvik-bf452d484529d0022f08cb2efcf31bc30f7ed7d8.tar.bz2
android_dalvik-bf452d484529d0022f08cb2efcf31bc30f7ed7d8.zip
Merge "[x86] Fix errors with WITH_JIT_TUNING defined"
Diffstat (limited to 'vm/compiler/codegen/x86')
-rw-r--r--vm/compiler/codegen/x86/CodegenInterface.cpp8
-rw-r--r--vm/compiler/codegen/x86/LowerInvoke.cpp11
-rw-r--r--vm/compiler/codegen/x86/LowerJump.cpp14
-rw-r--r--vm/compiler/codegen/x86/LowerReturn.cpp6
4 files changed, 37 insertions, 2 deletions
diff --git a/vm/compiler/codegen/x86/CodegenInterface.cpp b/vm/compiler/codegen/x86/CodegenInterface.cpp
index 0f516b311..46f097971 100644
--- a/vm/compiler/codegen/x86/CodegenInterface.cpp
+++ b/vm/compiler/codegen/x86/CodegenInterface.cpp
@@ -327,7 +327,7 @@ static bool inlineCachePatchEnqueue(PredictedChainingCell *cellAddr,
cellAddr->clazz = newContent->clazz;
//cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
#endif
-#if defined(IA_JIT_TUNING)
+#if defined(WITH_JIT_TUNING)
gDvmJit.icPatchInit++;
#endif
COMPILER_TRACE_CHAINING(
@@ -720,6 +720,12 @@ static void handleInvokePredictedChainingCell(CompilationUnit *cUnit, int blockI
#ifndef PREDICTED_CHAINING
//assume rPC for callee->insns in %ebx
scratchRegs[0] = PhysicalReg_EAX;
+#if defined(WITH_JIT_TUNING)
+ /* Predicted chaining is not enabled. Fall back to interpreter and
+ * indicate that predicted chaining was not done.
+ */
+ move_imm_to_reg(OpndSize_32, kInlineCacheMiss, PhysicalReg_EDX, true);
+#endif
call_dvmJitToInterpTraceSelectNoChain();
#else
/* make sure section for predicited chaining cell is 4-byte aligned */
diff --git a/vm/compiler/codegen/x86/LowerInvoke.cpp b/vm/compiler/codegen/x86/LowerInvoke.cpp
index 3d02190fa..10bc197f1 100644
--- a/vm/compiler/codegen/x86/LowerInvoke.cpp
+++ b/vm/compiler/codegen/x86/LowerInvoke.cpp
@@ -833,6 +833,12 @@ int common_invokeArgsDone(ArgsDoneType form, bool isJitFull) {
if(callNoChain) {
scratchRegs[0] = PhysicalReg_EAX;
load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
+#if defined(WITH_JIT_TUNING)
+ /* Predicted chaining failed. Fall back to interpreter and indicate
+ * inline cache miss.
+ */
+ move_imm_to_reg(OpndSize_32, kInlineCacheMiss, PhysicalReg_EDX, true);
+#endif
call_dvmJitToInterpTraceSelectNoChain(); //input: rPC in %ebx
} else {
//jump to the stub at (%esp)
@@ -906,6 +912,11 @@ void generate_invokeNative(bool generateForNcg) {
//move rPC by 6 (3 bytecode units for INVOKE)
alu_binary_imm_reg(OpndSize_32, add_opc, 6, PhysicalReg_EBX, true);
scratchRegs[0] = PhysicalReg_EAX;
+#if defined(WITH_JIT_TUNING)
+ /* Return address not in code cache. Indicate that continuing with interpreter
+ */
+ move_imm_to_reg(OpndSize_32, kCallsiteInterpreted, PhysicalReg_EDX, true);
+#endif
call_dvmJitToInterpTraceSelectNoChain(); //rPC in %ebx
}
return;
diff --git a/vm/compiler/codegen/x86/LowerJump.cpp b/vm/compiler/codegen/x86/LowerJump.cpp
index 2b10d6bbf..d4b0df3ef 100644
--- a/vm/compiler/codegen/x86/LowerJump.cpp
+++ b/vm/compiler/codegen/x86/LowerJump.cpp
@@ -1163,6 +1163,13 @@ int op_packed_switch() {
//get rPC, %eax has the relative PC offset
alu_binary_imm_reg(OpndSize_32, add_opc, (int)rPC, PhysicalReg_EAX, true);
scratchRegs[0] = PhysicalReg_SCRATCH_2;
+#if defined(WITH_JIT_TUNING)
+ /* Fall back to interpreter after resolving address of switch target.
+ * Indicate a kSwitchOverflow. Note: This is not an "overflow". But it helps
+ * count the times we return from a Switch
+ */
+ move_imm_to_mem(OpndSize_32, kSwitchOverflow, 0, PhysicalReg_ESP, true);
+#endif
jumpToInterpNoChain();
rPC += 3;
return 0;
@@ -1220,6 +1227,13 @@ int op_sparse_switch() {
//get rPC, %eax has the relative PC offset
alu_binary_imm_reg(OpndSize_32, add_opc, (int)rPC, PhysicalReg_EAX, true);
scratchRegs[0] = PhysicalReg_SCRATCH_2;
+#if defined(WITH_JIT_TUNING)
+ /* Fall back to interpreter after resolving address of switch target.
+ * Indicate a kSwitchOverflow. Note: This is not an "overflow". But it helps
+ * count the times we return from a Switch
+ */
+ move_imm_to_mem(OpndSize_32, kSwitchOverflow, 0, PhysicalReg_ESP, true);
+#endif
jumpToInterpNoChain();
rPC += 3;
return 0;
diff --git a/vm/compiler/codegen/x86/LowerReturn.cpp b/vm/compiler/codegen/x86/LowerReturn.cpp
index 928c05c9f..294d6b59d 100644
--- a/vm/compiler/codegen/x86/LowerReturn.cpp
+++ b/vm/compiler/codegen/x86/LowerReturn.cpp
@@ -95,7 +95,11 @@ int common_returnFromMethod() {
typedef void (*vmHelper)(int);
vmHelper funcPtr = dvmJitToInterpNoChainNoProfile; //%eax is the input
move_imm_to_reg(OpndSize_32, (int)funcPtr, C_SCRATCH_1, isScratchPhysical);
-
+#if defined(WITH_JIT_TUNING)
+ /* Return address not in code cache. Indicate that continuing with interpreter.
+ */
+ move_imm_to_mem(OpndSize_32, kCallsiteInterpreted, 0, PhysicalReg_ESP, true);
+#endif
unconditional_jump_reg(C_SCRATCH_1, isScratchPhysical);
touchEax();
return 0;