summaryrefslogtreecommitdiffstats
path: root/vm/mterp
diff options
context:
space:
mode:
authorSteve Kondik <shade@chemlab.org>2013-11-11 00:32:52 -0800
committerSteve Kondik <shade@chemlab.org>2013-11-11 00:32:52 -0800
commitbab417cc2aceee45238d5648975118bf3dd4c2e9 (patch)
tree39f1867dee9fe25cf7174917ef39ea3dd361fca4 /vm/mterp
parent5531b23c1546fdf896db25f7412291bada6e723c (diff)
parente17852495a15ddad079305c725d067ac95e4d655 (diff)
downloadandroid_dalvik-bab417cc2aceee45238d5648975118bf3dd4c2e9.tar.gz
android_dalvik-bab417cc2aceee45238d5648975118bf3dd4c2e9.tar.bz2
android_dalvik-bab417cc2aceee45238d5648975118bf3dd4c2e9.zip
Merge branch 'kk_2.7_rb1.9' of git://codeaurora.org/platform/dalvik into caf
Change-Id: I885fab2470352d0a625c9946d0d5c9111486b713
Diffstat (limited to 'vm/mterp')
-rw-r--r--vm/mterp/armv5te/OP_EXECUTE_INLINE.S30
-rw-r--r--vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S50
-rw-r--r--vm/mterp/c/OP_EXECUTE_INLINE.cpp55
-rw-r--r--vm/mterp/c/OP_EXECUTE_INLINE_RANGE.cpp52
-rw-r--r--vm/mterp/common/asm-constants.h2
-rw-r--r--vm/mterp/out/InterpAsm-armv5te-vfp.S80
-rw-r--r--vm/mterp/out/InterpAsm-armv5te.S80
-rw-r--r--vm/mterp/out/InterpAsm-armv7-a-neon.S80
-rw-r--r--vm/mterp/out/InterpAsm-armv7-a.S80
-rw-r--r--vm/mterp/out/InterpC-allstubs.cpp107
-rw-r--r--vm/mterp/out/InterpC-portable.cpp107
-rw-r--r--vm/mterp/out/InterpC-x86.cpp52
12 files changed, 760 insertions, 15 deletions
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
index ca71de198..7a268dc21 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
@@ -46,6 +46,35 @@
* interleave a little better. Increases code size.
*/
.L${opcode}_continue:
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #5 @ r0<- 4-r0
+ FETCH(rINST, 2) @ rINST<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .L${opcode}_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, rINST, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, rINST, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, rINST, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, rINST, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr rINST, .L${opcode}_table @ table of InlineOperation
+5: add rINST, pc
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+.L${opcode}_load_arg4:
+ FETCH(r1, 0) @ r1<- original rINST
+ mov r0, r1, lsr #8
+ and ip, r0, #0x000f
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vG
+ str r0, [sp, #4]
+ b 4b
+#else
rsb r0, r0, #4 @ r0<- 4-r0
FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
@@ -63,6 +92,7 @@
5: add rINST, pc
ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+#endif
/*
* We're debugging or profiling.
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
index d9e35b85f..52951812e 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
@@ -19,11 +19,11 @@
bne .L${opcode}_debugmode @ yes - take slow path
.L${opcode}_resume:
add r1, rSELF, #offThread_retval @ r1<- &self->retval
- sub sp, sp, #8 @ make room for arg, +64 bit align
+ sub sp, sp, #16 @ make room for arg, +64 bit align
mov r0, rINST, lsr #8 @ r0<- AA
str r1, [sp] @ push &self->retval
bl .L${opcode}_continue @ make call; will return after
- add sp, sp, #8 @ pop stack
+ add sp, sp, #16 @ pop stack
cmp r0, #0 @ test boolean result of inline
beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
@@ -38,10 +38,17 @@
* lr = return addr, above [DO NOT bl out of here w/o preserving LR]
*/
.L${opcode}_continue:
- rsb r0, r0, #4 @ r0<- 4-r0
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #7 @ r0<- 4-r0
FETCH(r9, 2) @ r9<- CCCC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
+8: b .L${opcode}_load_arg6
+ bl common_abort @ (skipped due to ARM prefetch)
+7: b .L${opcode}_load_arg5
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .L${opcode}_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
4: add ip, r9, #3 @ base+3
GET_VREG(r3, ip) @ r3<- vBase[3]
3: add ip, r9, #2 @ base+2
@@ -56,6 +63,43 @@
ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+.L${opcode}_load_arg6:
+ add ip, r9, #6 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #12]
+ b 7b
+
+.L${opcode}_load_arg5:
+ add ip, r9, #5 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #8]
+ b 6b
+
+.L${opcode}_load_arg4:
+ add ip, r9, #4 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #4]
+ b 4b
+
+#else
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- CCCC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: add ip, r9, #3 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+3: add ip, r9, #2 @ base+2
+ GET_VREG(r2, ip) @ r2<- vBase[2]
+2: add ip, r9, #1 @ base+1
+ GET_VREG(r1, ip) @ r1<- vBase[1]
+1: add ip, r9, #0 @ (nop)
+ GET_VREG(r0, ip) @ r0<- vBase[0]
+0:
+ ldr r9, .L${opcode}_table @ table of InlineOperation
+5: add r9, pc
+ ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+#endif
/*
* We're debugging or profiling.
diff --git a/vm/mterp/c/OP_EXECUTE_INLINE.cpp b/vm/mterp/c/OP_EXECUTE_INLINE.cpp
index 288ccc906..4655ae89b 100644
--- a/vm/mterp/c/OP_EXECUTE_INLINE.cpp
+++ b/vm/mterp/c/OP_EXECUTE_INLINE.cpp
@@ -1,5 +1,59 @@
HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
{
+#ifdef INLINE_ARG_EXPANDED
+ u4 arg0, arg1, arg2, arg3, arg4;
+ arg0 = arg1 = arg2 = arg3 = arg4 = 0;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_B(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* 0-4 register indices */
+ ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+ vsrc1, ref, vdst);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 5);
+
+ switch (vsrc1) {
+ case 5:
+ arg4 = GET_REGISTER(INST_A(inst));
+ /* fall through */
+ case 4:
+ arg3 = GET_REGISTER(vdst >> 12);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst & 0x0f);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+ if( vsrc1 == 5 ) {
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp5Dbg(arg0, arg1, arg2, arg3, &retval, ref, arg4))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp5Std(arg0, arg1, arg2, arg3, &retval, ref, arg4))
+ GOTO_exceptionThrown();
+ }
+ } else {
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+ }
+ }
+
+#else //ifdef INLINE_ARG_EXPANDED
/*
* This has the same form as other method calls, but we ignore
* the 5th argument (vA). This is chiefly because the first four
@@ -54,6 +108,7 @@ HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
}
+#endif //ifdef INLINE_ARG_EXPANDED
}
FINISH(3);
OP_END
diff --git a/vm/mterp/c/OP_EXECUTE_INLINE_RANGE.cpp b/vm/mterp/c/OP_EXECUTE_INLINE_RANGE.cpp
index 467f0e90e..48891d1bb 100644
--- a/vm/mterp/c/OP_EXECUTE_INLINE_RANGE.cpp
+++ b/vm/mterp/c/OP_EXECUTE_INLINE_RANGE.cpp
@@ -1,5 +1,56 @@
HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
{
+#ifdef INLINE_ARG_EXPANDED
+ u4 arg0, arg1, arg2, arg3, arg4, arg5, arg6;
+ arg0 = arg1 = arg2 = arg3 = arg4 = arg5 = 0; /* placate gcc */
+ arg6 = 0;
+
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* range base */
+ ALOGE("|execute-inline-range args=%d @%d {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 7);
+
+ switch (vsrc1) {
+ case 7:
+ arg6 = GET_REGISTER(vdst+6);
+ /* fall through */
+ case 6:
+ arg5 = GET_REGISTER(vdst+5);
+ /* fall through */
+ case 5:
+ arg4 = GET_REGISTER(vdst+4);
+ /* fall through */
+ case 4:
+ arg3 = GET_REGISTER(vdst+3);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER(vdst+2);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER(vdst+1);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst+0);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp7Dbg(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp7Std(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ }
+#else //ifdef INLINE_ARG_EXPANDED
u4 arg0, arg1, arg2, arg3;
arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */
@@ -38,6 +89,7 @@ HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
}
+#endif //ifdef INLINE_ARG_EXPANDED
}
FINISH(3);
OP_END
diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h
index 80b36fc04..406ee78cf 100644
--- a/vm/mterp/common/asm-constants.h
+++ b/vm/mterp/common/asm-constants.h
@@ -211,6 +211,8 @@ MTERP_OFFSET(offObject_lock, Object, lock, 4)
/* Lock shape */
MTERP_CONSTANT(LW_LOCK_OWNER_SHIFT, 3)
MTERP_CONSTANT(LW_HASH_STATE_SHIFT, 1)
+MTERP_CONSTANT(LW_HASH_STATE_SIZE, 2)
+MTERP_CONSTANT(LW_HASH_STATE_ABS_MASK, 0x6)
/* ArrayObject fields */
MTERP_OFFSET(offArrayObject_length, ArrayObject, length, 8)
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index a173c7226..c9ee0c237 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -7342,11 +7342,11 @@ dalvik_inst:
bne .LOP_EXECUTE_INLINE_RANGE_debugmode @ yes - take slow path
.LOP_EXECUTE_INLINE_RANGE_resume:
add r1, rSELF, #offThread_retval @ r1<- &self->retval
- sub sp, sp, #8 @ make room for arg, +64 bit align
+ sub sp, sp, #16 @ make room for arg, +64 bit align
mov r0, rINST, lsr #8 @ r0<- AA
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
- add sp, sp, #8 @ pop stack
+ add sp, sp, #16 @ pop stack
cmp r0, #0 @ test boolean result of inline
beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
@@ -9516,6 +9516,35 @@ d2l_doconv:
* interleave a little better. Increases code size.
*/
.LOP_EXECUTE_INLINE_continue:
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #5 @ r0<- 4-r0
+ FETCH(rINST, 2) @ rINST<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, rINST, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, rINST, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, rINST, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, rINST, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+5: add rINST, pc
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+.LOP_EXECUTE_INLINE_load_arg4:
+ FETCH(r1, 0) @ r1<- original rINST
+ mov r0, r1, lsr #8
+ and ip, r0, #0x000f
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vG
+ str r0, [sp, #4]
+ b 4b
+#else
rsb r0, r0, #4 @ r0<- 4-r0
FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
@@ -9533,6 +9562,7 @@ d2l_doconv:
5: add rINST, pc
ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+#endif
/*
* We're debugging or profiling.
@@ -9577,10 +9607,17 @@ d2l_doconv:
* lr = return addr, above [DO NOT bl out of here w/o preserving LR]
*/
.LOP_EXECUTE_INLINE_RANGE_continue:
- rsb r0, r0, #4 @ r0<- 4-r0
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #7 @ r0<- 4-r0
FETCH(r9, 2) @ r9<- CCCC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
+8: b .LOP_EXECUTE_INLINE_RANGE_load_arg6
+ bl common_abort @ (skipped due to ARM prefetch)
+7: b .LOP_EXECUTE_INLINE_RANGE_load_arg5
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_RANGE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
4: add ip, r9, #3 @ base+3
GET_VREG(r3, ip) @ r3<- vBase[3]
3: add ip, r9, #2 @ base+2
@@ -9595,6 +9632,43 @@ d2l_doconv:
ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+.LOP_EXECUTE_INLINE_RANGE_load_arg6:
+ add ip, r9, #6 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #12]
+ b 7b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg5:
+ add ip, r9, #5 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #8]
+ b 6b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg4:
+ add ip, r9, #4 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #4]
+ b 4b
+
+#else
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- CCCC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: add ip, r9, #3 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+3: add ip, r9, #2 @ base+2
+ GET_VREG(r2, ip) @ r2<- vBase[2]
+2: add ip, r9, #1 @ base+1
+ GET_VREG(r1, ip) @ r1<- vBase[1]
+1: add ip, r9, #0 @ (nop)
+ GET_VREG(r0, ip) @ r0<- vBase[0]
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation
+5: add r9, pc
+ ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+#endif
/*
* We're debugging or profiling.
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index 7b6c9d18f..5ba889d7c 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -7664,11 +7664,11 @@ d2i_doconv:
bne .LOP_EXECUTE_INLINE_RANGE_debugmode @ yes - take slow path
.LOP_EXECUTE_INLINE_RANGE_resume:
add r1, rSELF, #offThread_retval @ r1<- &self->retval
- sub sp, sp, #8 @ make room for arg, +64 bit align
+ sub sp, sp, #16 @ make room for arg, +64 bit align
mov r0, rINST, lsr #8 @ r0<- AA
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
- add sp, sp, #8 @ pop stack
+ add sp, sp, #16 @ pop stack
cmp r0, #0 @ test boolean result of inline
beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
@@ -9974,6 +9974,35 @@ d2l_doconv:
* interleave a little better. Increases code size.
*/
.LOP_EXECUTE_INLINE_continue:
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #5 @ r0<- 4-r0
+ FETCH(rINST, 2) @ rINST<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, rINST, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, rINST, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, rINST, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, rINST, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+5: add rINST, pc
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+.LOP_EXECUTE_INLINE_load_arg4:
+ FETCH(r1, 0) @ r1<- original rINST
+ mov r0, r1, lsr #8
+ and ip, r0, #0x000f
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vG
+ str r0, [sp, #4]
+ b 4b
+#else
rsb r0, r0, #4 @ r0<- 4-r0
FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
@@ -9991,6 +10020,7 @@ d2l_doconv:
5: add rINST, pc
ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+#endif
/*
* We're debugging or profiling.
@@ -10035,10 +10065,17 @@ d2l_doconv:
* lr = return addr, above [DO NOT bl out of here w/o preserving LR]
*/
.LOP_EXECUTE_INLINE_RANGE_continue:
- rsb r0, r0, #4 @ r0<- 4-r0
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #7 @ r0<- 4-r0
FETCH(r9, 2) @ r9<- CCCC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
+8: b .LOP_EXECUTE_INLINE_RANGE_load_arg6
+ bl common_abort @ (skipped due to ARM prefetch)
+7: b .LOP_EXECUTE_INLINE_RANGE_load_arg5
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_RANGE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
4: add ip, r9, #3 @ base+3
GET_VREG(r3, ip) @ r3<- vBase[3]
3: add ip, r9, #2 @ base+2
@@ -10053,6 +10090,43 @@ d2l_doconv:
ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+.LOP_EXECUTE_INLINE_RANGE_load_arg6:
+ add ip, r9, #6 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #12]
+ b 7b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg5:
+ add ip, r9, #5 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #8]
+ b 6b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg4:
+ add ip, r9, #4 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #4]
+ b 4b
+
+#else
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- CCCC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: add ip, r9, #3 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+3: add ip, r9, #2 @ base+2
+ GET_VREG(r2, ip) @ r2<- vBase[2]
+2: add ip, r9, #1 @ base+1
+ GET_VREG(r1, ip) @ r1<- vBase[1]
+1: add ip, r9, #0 @ (nop)
+ GET_VREG(r0, ip) @ r0<- vBase[0]
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation
+5: add r9, pc
+ ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+#endif
/*
* We're debugging or profiling.
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index c3419c230..7d3b08f46 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -7300,11 +7300,11 @@ dalvik_inst:
bne .LOP_EXECUTE_INLINE_RANGE_debugmode @ yes - take slow path
.LOP_EXECUTE_INLINE_RANGE_resume:
add r1, rSELF, #offThread_retval @ r1<- &self->retval
- sub sp, sp, #8 @ make room for arg, +64 bit align
+ sub sp, sp, #16 @ make room for arg, +64 bit align
mov r0, rINST, lsr #8 @ r0<- AA
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
- add sp, sp, #8 @ pop stack
+ add sp, sp, #16 @ pop stack
cmp r0, #0 @ test boolean result of inline
beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
@@ -9453,6 +9453,35 @@ d2l_doconv:
* interleave a little better. Increases code size.
*/
.LOP_EXECUTE_INLINE_continue:
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #5 @ r0<- 4-r0
+ FETCH(rINST, 2) @ rINST<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, rINST, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, rINST, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, rINST, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, rINST, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+5: add rINST, pc
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+.LOP_EXECUTE_INLINE_load_arg4:
+ FETCH(r1, 0) @ r1<- original rINST
+ mov r0, r1, lsr #8
+ and ip, r0, #0x000f
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vG
+ str r0, [sp, #4]
+ b 4b
+#else
rsb r0, r0, #4 @ r0<- 4-r0
FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
@@ -9470,6 +9499,7 @@ d2l_doconv:
5: add rINST, pc
ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+#endif
/*
* We're debugging or profiling.
@@ -9514,10 +9544,17 @@ d2l_doconv:
* lr = return addr, above [DO NOT bl out of here w/o preserving LR]
*/
.LOP_EXECUTE_INLINE_RANGE_continue:
- rsb r0, r0, #4 @ r0<- 4-r0
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #7 @ r0<- 4-r0
FETCH(r9, 2) @ r9<- CCCC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
+8: b .LOP_EXECUTE_INLINE_RANGE_load_arg6
+ bl common_abort @ (skipped due to ARM prefetch)
+7: b .LOP_EXECUTE_INLINE_RANGE_load_arg5
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_RANGE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
4: add ip, r9, #3 @ base+3
GET_VREG(r3, ip) @ r3<- vBase[3]
3: add ip, r9, #2 @ base+2
@@ -9532,6 +9569,43 @@ d2l_doconv:
ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+.LOP_EXECUTE_INLINE_RANGE_load_arg6:
+ add ip, r9, #6 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #12]
+ b 7b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg5:
+ add ip, r9, #5 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #8]
+ b 6b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg4:
+ add ip, r9, #4 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #4]
+ b 4b
+
+#else
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- CCCC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: add ip, r9, #3 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+3: add ip, r9, #2 @ base+2
+ GET_VREG(r2, ip) @ r2<- vBase[2]
+2: add ip, r9, #1 @ base+1
+ GET_VREG(r1, ip) @ r1<- vBase[1]
+1: add ip, r9, #0 @ (nop)
+ GET_VREG(r0, ip) @ r0<- vBase[0]
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation
+5: add r9, pc
+ ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+#endif
/*
* We're debugging or profiling.
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index 254224566..199d59acb 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -7300,11 +7300,11 @@ dalvik_inst:
bne .LOP_EXECUTE_INLINE_RANGE_debugmode @ yes - take slow path
.LOP_EXECUTE_INLINE_RANGE_resume:
add r1, rSELF, #offThread_retval @ r1<- &self->retval
- sub sp, sp, #8 @ make room for arg, +64 bit align
+ sub sp, sp, #16 @ make room for arg, +64 bit align
mov r0, rINST, lsr #8 @ r0<- AA
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
- add sp, sp, #8 @ pop stack
+ add sp, sp, #16 @ pop stack
cmp r0, #0 @ test boolean result of inline
beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
@@ -9453,6 +9453,35 @@ d2l_doconv:
* interleave a little better. Increases code size.
*/
.LOP_EXECUTE_INLINE_continue:
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #5 @ r0<- 4-r0
+ FETCH(rINST, 2) @ rINST<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, rINST, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, rINST, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, rINST, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, rINST, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+5: add rINST, pc
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+.LOP_EXECUTE_INLINE_load_arg4:
+ FETCH(r1, 0) @ r1<- original rINST
+ mov r0, r1, lsr #8
+ and ip, r0, #0x000f
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vG
+ str r0, [sp, #4]
+ b 4b
+#else
rsb r0, r0, #4 @ r0<- 4-r0
FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
@@ -9470,6 +9499,7 @@ d2l_doconv:
5: add rINST, pc
ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+#endif
/*
* We're debugging or profiling.
@@ -9514,10 +9544,17 @@ d2l_doconv:
* lr = return addr, above [DO NOT bl out of here w/o preserving LR]
*/
.LOP_EXECUTE_INLINE_RANGE_continue:
- rsb r0, r0, #4 @ r0<- 4-r0
+#ifdef INLINE_ARG_EXPANDED
+ rsb r0, r0, #7 @ r0<- 4-r0
FETCH(r9, 2) @ r9<- CCCC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
+8: b .LOP_EXECUTE_INLINE_RANGE_load_arg6
+ bl common_abort @ (skipped due to ARM prefetch)
+7: b .LOP_EXECUTE_INLINE_RANGE_load_arg5
+ bl common_abort @ (skipped due to ARM prefetch)
+6: b .LOP_EXECUTE_INLINE_RANGE_load_arg4
+ bl common_abort @ (skipped due to ARM prefetch)
4: add ip, r9, #3 @ base+3
GET_VREG(r3, ip) @ r3<- vBase[3]
3: add ip, r9, #2 @ base+2
@@ -9532,6 +9569,43 @@ d2l_doconv:
ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
+.LOP_EXECUTE_INLINE_RANGE_load_arg6:
+ add ip, r9, #6 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #12]
+ b 7b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg5:
+ add ip, r9, #5 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #8]
+ b 6b
+
+.LOP_EXECUTE_INLINE_RANGE_load_arg4:
+ add ip, r9, #4 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+ str r3, [sp, #4]
+ b 4b
+
+#else
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- CCCC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: add ip, r9, #3 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+3: add ip, r9, #2 @ base+2
+ GET_VREG(r2, ip) @ r2<- vBase[2]
+2: add ip, r9, #1 @ base+1
+ GET_VREG(r1, ip) @ r1<- vBase[1]
+1: add ip, r9, #0 @ (nop)
+ GET_VREG(r0, ip) @ r0<- vBase[0]
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation
+5: add r9, pc
+ ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+#endif
/*
* We're debugging or profiling.
diff --git a/vm/mterp/out/InterpC-allstubs.cpp b/vm/mterp/out/InterpC-allstubs.cpp
index 1ef878370..5258cbcee 100644
--- a/vm/mterp/out/InterpC-allstubs.cpp
+++ b/vm/mterp/out/InterpC-allstubs.cpp
@@ -2810,6 +2810,60 @@ OP_END
/* File: c/OP_EXECUTE_INLINE.cpp */
HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
{
+#ifdef INLINE_ARG_EXPANDED
+ u4 arg0, arg1, arg2, arg3, arg4;
+ arg0 = arg1 = arg2 = arg3 = arg4 = 0;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_B(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* 0-4 register indices */
+ ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+ vsrc1, ref, vdst);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 5);
+
+ switch (vsrc1) {
+ case 5:
+ arg4 = GET_REGISTER(INST_A(inst));
+ /* fall through */
+ case 4:
+ arg3 = GET_REGISTER(vdst >> 12);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst & 0x0f);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+ if( vsrc1 == 5 ) {
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp5Dbg(arg0, arg1, arg2, arg3, &retval, ref, arg4))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp5Std(arg0, arg1, arg2, arg3, &retval, ref, arg4))
+ GOTO_exceptionThrown();
+ }
+ } else {
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+ }
+ }
+
+#else //ifdef INLINE_ARG_EXPANDED
/*
* This has the same form as other method calls, but we ignore
* the 5th argument (vA). This is chiefly because the first four
@@ -2864,6 +2918,7 @@ HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
}
+#endif //ifdef INLINE_ARG_EXPANDED
}
FINISH(3);
OP_END
@@ -2871,6 +2926,57 @@ OP_END
/* File: c/OP_EXECUTE_INLINE_RANGE.cpp */
HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
{
+#ifdef INLINE_ARG_EXPANDED
+ u4 arg0, arg1, arg2, arg3, arg4, arg5, arg6;
+ arg0 = arg1 = arg2 = arg3 = arg4 = arg5 = 0; /* placate gcc */
+ arg6 = 0;
+
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* range base */
+ ALOGE("|execute-inline-range args=%d @%d {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 7);
+
+ switch (vsrc1) {
+ case 7:
+ arg6 = GET_REGISTER(vdst+6);
+ /* fall through */
+ case 6:
+ arg5 = GET_REGISTER(vdst+5);
+ /* fall through */
+ case 5:
+ arg4 = GET_REGISTER(vdst+4);
+ /* fall through */
+ case 4:
+ arg3 = GET_REGISTER(vdst+3);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER(vdst+2);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER(vdst+1);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst+0);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp7Dbg(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp7Std(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ }
+#else //ifdef INLINE_ARG_EXPANDED
u4 arg0, arg1, arg2, arg3;
arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */
@@ -2909,6 +3015,7 @@ HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
}
+#endif //ifdef INLINE_ARG_EXPANDED
}
FINISH(3);
OP_END
diff --git a/vm/mterp/out/InterpC-portable.cpp b/vm/mterp/out/InterpC-portable.cpp
index 0328aa883..ee02aa1e8 100644
--- a/vm/mterp/out/InterpC-portable.cpp
+++ b/vm/mterp/out/InterpC-portable.cpp
@@ -2821,6 +2821,60 @@ OP_END
/* File: c/OP_EXECUTE_INLINE.cpp */
HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
{
+#ifdef INLINE_ARG_EXPANDED
+ u4 arg0, arg1, arg2, arg3, arg4;
+ arg0 = arg1 = arg2 = arg3 = arg4 = 0;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_B(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* 0-4 register indices */
+ ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+ vsrc1, ref, vdst);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 5);
+
+ switch (vsrc1) {
+ case 5:
+ arg4 = GET_REGISTER(INST_A(inst));
+ /* fall through */
+ case 4:
+ arg3 = GET_REGISTER(vdst >> 12);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst & 0x0f);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+ if( vsrc1 == 5 ) {
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp5Dbg(arg0, arg1, arg2, arg3, &retval, ref, arg4))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp5Std(arg0, arg1, arg2, arg3, &retval, ref, arg4))
+ GOTO_exceptionThrown();
+ }
+ } else {
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+ }
+ }
+
+#else //ifdef INLINE_ARG_EXPANDED
/*
* This has the same form as other method calls, but we ignore
* the 5th argument (vA). This is chiefly because the first four
@@ -2875,6 +2929,7 @@ HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
}
+#endif //ifdef INLINE_ARG_EXPANDED
}
FINISH(3);
OP_END
@@ -2882,6 +2937,57 @@ OP_END
/* File: c/OP_EXECUTE_INLINE_RANGE.cpp */
HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
{
+#ifdef INLINE_ARG_EXPANDED
+ u4 arg0, arg1, arg2, arg3, arg4, arg5, arg6;
+ arg0 = arg1 = arg2 = arg3 = arg4 = arg5 = 0; /* placate gcc */
+ arg6 = 0;
+
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* range base */
+ ALOGE("|execute-inline-range args=%d @%d {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 7);
+
+ switch (vsrc1) {
+ case 7:
+ arg6 = GET_REGISTER(vdst+6);
+ /* fall through */
+ case 6:
+ arg5 = GET_REGISTER(vdst+5);
+ /* fall through */
+ case 5:
+ arg4 = GET_REGISTER(vdst+4);
+ /* fall through */
+ case 4:
+ arg3 = GET_REGISTER(vdst+3);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER(vdst+2);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER(vdst+1);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst+0);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp7Dbg(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp7Std(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ }
+#else //ifdef INLINE_ARG_EXPANDED
u4 arg0, arg1, arg2, arg3;
arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */
@@ -2920,6 +3026,7 @@ HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
}
+#endif //ifdef INLINE_ARG_EXPANDED
}
FINISH(3);
OP_END
diff --git a/vm/mterp/out/InterpC-x86.cpp b/vm/mterp/out/InterpC-x86.cpp
index 77dc8885c..eb8a1e9e4 100644
--- a/vm/mterp/out/InterpC-x86.cpp
+++ b/vm/mterp/out/InterpC-x86.cpp
@@ -1181,6 +1181,57 @@ OP_END
/* File: c/OP_EXECUTE_INLINE_RANGE.cpp */
HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
{
+#ifdef INLINE_ARG_EXPANDED
+ u4 arg0, arg1, arg2, arg3, arg4, arg5, arg6;
+ arg0 = arg1 = arg2 = arg3 = arg4 = arg5 = 0; /* placate gcc */
+ arg6 = 0;
+
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* range base */
+ ALOGE("|execute-inline-range args=%d @%d {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 7);
+
+ switch (vsrc1) {
+ case 7:
+ arg6 = GET_REGISTER(vdst+6);
+ /* fall through */
+ case 6:
+ arg5 = GET_REGISTER(vdst+5);
+ /* fall through */
+ case 5:
+ arg4 = GET_REGISTER(vdst+4);
+ /* fall through */
+ case 4:
+ arg3 = GET_REGISTER(vdst+3);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER(vdst+2);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER(vdst+1);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst+0);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+ if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
+ if (!dvmPerformInlineOp7Dbg(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ } else {
+ if (!dvmPerformInlineOp7Std(arg0, arg1, arg2, arg3, &retval, ref, arg4, arg5, arg6))
+ GOTO_exceptionThrown();
+ }
+#else //ifdef INLINE_ARG_EXPANDED
u4 arg0, arg1, arg2, arg3;
arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */
@@ -1219,6 +1270,7 @@ HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
}
+#endif //ifdef INLINE_ARG_EXPANDED
}
FINISH(3);
OP_END