summaryrefslogtreecommitdiffstats
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S8
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S8
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S4
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S8
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S6
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_RETURN.S4
-rw-r--r--vm/compiler/template/armv5te/footer.S20
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S64
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv5te.S96
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S64
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S64
-rw-r--r--vm/mterp/armv5te/footer.S8
-rw-r--r--vm/mterp/out/InterpAsm-armv5te-vfp.S8
-rw-r--r--vm/mterp/out/InterpAsm-armv5te.S8
-rw-r--r--vm/mterp/out/InterpAsm-armv7-a-neon.S8
-rw-r--r--vm/mterp/out/InterpAsm-armv7-a.S8
16 files changed, 193 insertions, 193 deletions
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
index 4fd5a71d7..23614e913 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
@@ -16,8 +16,8 @@
/* op vAA, vBB, vCC */
push {r0-r3} @ save operands
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
bhi .L${opcode}_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0, trumps less than
@@ -30,8 +30,8 @@
.L${opcode}_gt_or_nan:
pop {r2-r3} @ restore operands in reverse order
pop {r0-r1} @ restore operands in reverse order
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
$naninst @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
index d0f2bec93..f9293e6d3 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
@@ -36,8 +36,8 @@
mov r9, r0 @ Save copies - we may need to redo
mov r10, r1
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
bhi .L${opcode}_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r0<- -1
moveq r0, #0 @ (equal) r0<- 0, trumps less than
@@ -48,8 +48,8 @@
.L${opcode}_gt_or_nan:
mov r0, r10 @ restore in reverse order
mov r1, r9
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
$naninst @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
index 03b97a479..99a17abdc 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -41,8 +41,8 @@ $chaintgt:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index 2a73c22d5..d8661d983 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -44,8 +44,8 @@
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -54,8 +54,8 @@
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index a7a09614a..b7015eb5f 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -48,8 +48,8 @@
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -57,4 +57,4 @@
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index d074c9eaa..b10afcf3f 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -9,8 +9,8 @@
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index 16660ae3b..001b80b2f 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -29,20 +29,20 @@
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -68,7 +68,7 @@
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -85,7 +85,7 @@
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 331d90213..93a677e4f 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1623,7 +1623,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1676,8 +1676,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1807,8 +1807,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1817,8 +1817,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1936,7 +1936,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 044843e1c..b9de01fc6 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -514,8 +514,8 @@ dvmCompiler_TEMPLATE_CMPG_DOUBLE:
/* op vAA, vBB, vCC */
push {r0-r3} @ save operands
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
bhi .LTEMPLATE_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0, trumps less than
@@ -528,8 +528,8 @@ dvmCompiler_TEMPLATE_CMPG_DOUBLE:
.LTEMPLATE_CMPG_DOUBLE_gt_or_nan:
pop {r2-r3} @ restore operands in reverse order
pop {r0-r1} @ restore operands in reverse order
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mov r0, #1 @ r1<- 1 or -1 for NaN
@@ -558,8 +558,8 @@ dvmCompiler_TEMPLATE_CMPL_DOUBLE:
/* op vAA, vBB, vCC */
push {r0-r3} @ save operands
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
bhi .LTEMPLATE_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0, trumps less than
@@ -572,8 +572,8 @@ dvmCompiler_TEMPLATE_CMPL_DOUBLE:
.LTEMPLATE_CMPL_DOUBLE_gt_or_nan:
pop {r2-r3} @ restore operands in reverse order
pop {r0-r1} @ restore operands in reverse order
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mvn r0, #0 @ r1<- 1 or -1 for NaN
@@ -622,8 +622,8 @@ dvmCompiler_TEMPLATE_CMPG_FLOAT:
mov r9, r0 @ Save copies - we may need to redo
mov r10, r1
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
bhi .LTEMPLATE_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r0<- -1
moveq r0, #0 @ (equal) r0<- 0, trumps less than
@@ -634,8 +634,8 @@ dvmCompiler_TEMPLATE_CMPG_FLOAT:
.LTEMPLATE_CMPG_FLOAT_gt_or_nan:
mov r0, r10 @ restore in reverse order
mov r1, r9
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mov r0, #1 @ r1<- 1 or -1 for NaN
@@ -684,8 +684,8 @@ dvmCompiler_TEMPLATE_CMPL_FLOAT:
mov r9, r0 @ Save copies - we may need to redo
mov r10, r1
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
bhi .LTEMPLATE_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r0<- -1
moveq r0, #0 @ (equal) r0<- 0, trumps less than
@@ -696,8 +696,8 @@ dvmCompiler_TEMPLATE_CMPL_FLOAT:
.LTEMPLATE_CMPL_FLOAT_gt_or_nan:
mov r0, r10 @ restore in reverse order
mov r1, r9
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mvn r0, #0 @ r1<- 1 or -1 for NaN
@@ -1239,8 +1239,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1345,8 +1345,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1354,7 +1354,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1407,8 +1407,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1538,8 +1538,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1548,8 +1548,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1611,20 +1611,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1650,7 +1650,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1667,7 +1667,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index ba798e06d..23f281228 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1623,7 +1623,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1676,8 +1676,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1807,8 +1807,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1817,8 +1817,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1936,7 +1936,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 825ac408a..360ebfa8b 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1623,7 +1623,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1676,8 +1676,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1807,8 +1807,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1817,8 +1817,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1936,7 +1936,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index 4afe4718e..5c7062f2f 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -684,8 +684,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -711,8 +711,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index a0835f9cc..cc5a8770c 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -26912,8 +26912,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -26939,8 +26939,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index 5e4ccd416..6bbd91afa 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -27370,8 +27370,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -27397,8 +27397,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index 3a01a832c..0203b02f6 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -26849,8 +26849,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -26876,8 +26876,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index e8a95010a..efb1ea8a4 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -26849,8 +26849,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -26876,8 +26876,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)