summaryrefslogtreecommitdiffstats
path: root/vm/compiler/codegen
diff options
context:
space:
mode:
authorBen Cheng <bccheng@android.com>2011-02-18 17:12:42 -0800
committerBen Cheng <bccheng@android.com>2011-03-02 13:45:54 -0800
commit20d7e6c67af128d5bf7cc003564a8122c4101c84 (patch)
tree2c15767179646dd5c24c87c1a8ebfaedd42686c4 /vm/compiler/codegen
parent131ec9ff58fcc6a7440297e2cbee23df93d2974d (diff)
downloadandroid_dalvik-20d7e6c67af128d5bf7cc003564a8122c4101c84.tar.gz
android_dalvik-20d7e6c67af128d5bf7cc003564a8122c4101c84.tar.bz2
android_dalvik-20d7e6c67af128d5bf7cc003564a8122c4101c84.zip
Handle OP_THROW in the method JIT.
The current implementation is to reconstruct the leaf Dalvik frame and punt to the interpreter, since the amount of work involed to match each catch block and walk through the stack frames is just not worth JIT'ing. Additional changes: - Fixed a control-flow bug where a block that ends with a throw shouldn't have a fall-through block. - Fixed a code cache lookup bug so that method-based compilation is guaranteed a slot in the profiling table. - Created separate handler routines based on opcode format for the method-based JIT. - Renamed a few core registers that also have special meanings to the VM or ARM architecture. Change-Id: I429b3633f281a0e04d352ae17a1c4f4a41bab156
Diffstat (limited to 'vm/compiler/codegen')
-rw-r--r--vm/compiler/codegen/arm/ArchUtility.c4
-rw-r--r--vm/compiler/codegen/arm/ArmLIR.h42
-rw-r--r--vm/compiler/codegen/arm/ArmRallocUtil.c2
-rw-r--r--vm/compiler/codegen/arm/Assemble.c8
-rw-r--r--vm/compiler/codegen/arm/CodegenCommon.c2
-rw-r--r--vm/compiler/codegen/arm/CodegenDriver.c247
-rw-r--r--vm/compiler/codegen/arm/FP/ThumbVFP.c2
-rw-r--r--vm/compiler/codegen/arm/GlobalOptimizations.c8
-rw-r--r--vm/compiler/codegen/arm/Thumb/Factory.c26
-rw-r--r--vm/compiler/codegen/arm/Thumb/Gen.c12
-rw-r--r--vm/compiler/codegen/arm/Thumb2/Factory.c26
-rw-r--r--vm/compiler/codegen/arm/Thumb2/Gen.c14
-rw-r--r--vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c279
13 files changed, 470 insertions, 202 deletions
diff --git a/vm/compiler/codegen/arm/ArchUtility.c b/vm/compiler/codegen/arm/ArchUtility.c
index be6d56b94..02a02fa1f 100644
--- a/vm/compiler/codegen/arm/ArchUtility.c
+++ b/vm/compiler/codegen/arm/ArchUtility.c
@@ -34,9 +34,9 @@ static char * decodeRegList(ArmOpcode opcode, int vector, char *buf)
if (vector & 0x1) {
int regId = i;
if (opcode == kThumbPush && i == 8) {
- regId = rlr;
+ regId = r14lr;
} else if (opcode == kThumbPop && i == 8) {
- regId = rpc;
+ regId = r15pc;
}
if (printed) {
sprintf(buf + strlen(buf), ", r%d", regId);
diff --git a/vm/compiler/codegen/arm/ArmLIR.h b/vm/compiler/codegen/arm/ArmLIR.h
index 1f8b5d097..c47c29177 100644
--- a/vm/compiler/codegen/arm/ArmLIR.h
+++ b/vm/compiler/codegen/arm/ArmLIR.h
@@ -203,23 +203,33 @@ typedef enum OpKind {
kOpUncondBr,
} OpKind;
+/*
+ * Annotate special-purpose core registers:
+ * - VM: r4PC, r5FP, and r6SELF
+ * - ARM architecture: r13sp, r14lr, and r15pc
+ *
+ * rPC, rFP, and rSELF are for architecture-independent code to use.
+ */
typedef enum NativeRegisterPool {
- r0 = 0,
- r1 = 1,
- r2 = 2,
- r3 = 3,
- r4PC = 4,
- rFP = 5,
- rSELF = 6,
- r7 = 7,
- r8 = 8,
- r9 = 9,
- r10 = 10,
- r11 = 11,
- r12 = 12,
- r13 = 13,
- rlr = 14,
- rpc = 15,
+ r0 = 0,
+ r1 = 1,
+ r2 = 2,
+ r3 = 3,
+ rPC = 4,
+ r4PC = rPC,
+ rFP = 5,
+ r5FP = rFP,
+ rSELF = 6,
+ r6SELF = rSELF,
+ r7 = 7,
+ r8 = 8,
+ r9 = 9,
+ r10 = 10,
+ r11 = 11,
+ r12 = 12,
+ r13sp = 13,
+ r14lr = 14,
+ r15pc = 15,
fr0 = 0 + FP_REG_OFFSET,
fr1 = 1 + FP_REG_OFFSET,
fr2 = 2 + FP_REG_OFFSET,
diff --git a/vm/compiler/codegen/arm/ArmRallocUtil.c b/vm/compiler/codegen/arm/ArmRallocUtil.c
index d6e73a07a..3a5afa28c 100644
--- a/vm/compiler/codegen/arm/ArmRallocUtil.c
+++ b/vm/compiler/codegen/arm/ArmRallocUtil.c
@@ -58,7 +58,7 @@ extern void dvmCompilerClobberCallRegs(CompilationUnit *cUnit)
dvmCompilerClobber(cUnit, r9); // Need to do this?, be conservative
dvmCompilerClobber(cUnit, r11);
dvmCompilerClobber(cUnit, r12);
- dvmCompilerClobber(cUnit, rlr);
+ dvmCompilerClobber(cUnit, r14lr);
}
/* Clobber all of the temps that might be used by a handler. */
diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c
index 79f5ec758..8fc1add35 100644
--- a/vm/compiler/codegen/arm/Assemble.c
+++ b/vm/compiler/codegen/arm/Assemble.c
@@ -788,7 +788,7 @@ ArmEncodingMap EncodingMap[kArmLast] = {
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
- "ldr", "r!0d, [rpc, #!1d]", 2),
+ "ldr", "r!0d, [r15pc, #!1d]", 2),
ENCODING_MAP(kThumb2BCond, 0xf0008000,
kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
@@ -880,12 +880,12 @@ ArmEncodingMap EncodingMap[kArmLast] = {
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
- "ldr", "r!0d, [rpc, -#!1d]", 2),
+ "ldr", "r!0d, [r15pc, -#!1d]", 2),
};
/*
* The fake NOP of moving r0 to r0 actually will incur data stalls if r0 is
- * not ready. Since r5 (rFP) is not updated often, it is less likely to
+ * not ready. Since r5FP is not updated often, it is less likely to
* generate unnecessary stall cycles.
*/
#define PADDING_MOV_R5_R5 0x1C2D
@@ -950,7 +950,7 @@ static AssemblerStatus assembleInstructions(CompilationUnit *cUnit,
if (lir->opcode == kThumbLdrPcRel ||
lir->opcode == kThumb2LdrPcRel12 ||
lir->opcode == kThumbAddPcRel ||
- ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == rpc))) {
+ ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == r15pc))) {
ArmLIR *lirTarget = (ArmLIR *) lir->generic.target;
intptr_t pc = (lir->generic.offset + 4) & ~3;
intptr_t target = lirTarget->generic.offset;
diff --git a/vm/compiler/codegen/arm/CodegenCommon.c b/vm/compiler/codegen/arm/CodegenCommon.c
index 75134bf88..2555a8dbc 100644
--- a/vm/compiler/codegen/arm/CodegenCommon.c
+++ b/vm/compiler/codegen/arm/CodegenCommon.c
@@ -69,7 +69,7 @@ static void setMemRefType(ArmLIR *lir, bool isLoad, int memType)
}
/*
- * Mark load/store instructions that access Dalvik registers through rFP +
+ * Mark load/store instructions that access Dalvik registers through r5FP +
* offset.
*/
static void annotateDalvikRegAccess(ArmLIR *lir, int regId, bool isLoad)
diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c
index 02e6f877e..eda0bec55 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.c
+++ b/vm/compiler/codegen/arm/CodegenDriver.c
@@ -32,7 +32,7 @@ static void markCard(CompilationUnit *cUnit, int valReg, int tgtAddrReg)
int regCardBase = dvmCompilerAllocTemp(cUnit);
int regCardNo = dvmCompilerAllocTemp(cUnit);
ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondEq, valReg, 0);
- loadWordDisp(cUnit, rSELF, offsetof(Thread, cardTable),
+ loadWordDisp(cUnit, r6SELF, offsetof(Thread, cardTable),
regCardBase);
opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
@@ -160,10 +160,10 @@ static bool genArithOpDoublePortable(CompilationUnit *cUnit, MIR *mir,
return true;
}
dvmCompilerFlushAllRegs(cUnit); /* Send everything to home location */
- LOAD_FUNC_ADDR(cUnit, rlr, (int)funct);
+ LOAD_FUNC_ADDR(cUnit, r14lr, (int)funct);
loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- opReg(cUnit, kOpBlx, rlr);
+ opReg(cUnit, kOpBlx, r14lr);
dvmCompilerClobberCallRegs(cUnit);
rlResult = dvmCompilerGetReturnWide(cUnit);
storeValueWide(cUnit, rlDest, rlResult);
@@ -220,7 +220,7 @@ static void selfVerificationBranchInsert(LIR *currentLIR, ArmOpcode opcode,
* Example where r14 (LR) is preserved around a heap access under
* self-verification mode in Thumb2:
*
- * D/dalvikvm( 1538): 0x59414c5e (0026): ldr r14, [rpc, #220] <-hoisted
+ * D/dalvikvm( 1538): 0x59414c5e (0026): ldr r14, [r15pc, #220] <-hoisted
* D/dalvikvm( 1538): 0x59414c62 (002a): mla r4, r0, r8, r4
* D/dalvikvm( 1538): 0x59414c66 (002e): adds r3, r4, r3
* D/dalvikvm( 1538): 0x59414c6a (0032): push <r5, r14> ---+
@@ -744,9 +744,9 @@ static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir,
// Adjust return regs in to handle case of rem returning r2/r3
dvmCompilerFlushAllRegs(cUnit); /* Send everything to home location */
loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LOAD_FUNC_ADDR(cUnit, rlr, (int) callTgt);
+ LOAD_FUNC_ADDR(cUnit, r14lr, (int) callTgt);
loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- opReg(cUnit, kOpBlx, rlr);
+ opReg(cUnit, kOpBlx, r14lr);
dvmCompilerClobberCallRegs(cUnit);
if (retReg == r0)
rlResult = dvmCompilerGetReturnWide(cUnit);
@@ -952,28 +952,23 @@ static ArmLIR *genUnconditionalBranch(CompilationUnit *cUnit, ArmLIR *target)
/* Perform the actual operation for OP_RETURN_* */
static void genReturnCommon(CompilationUnit *cUnit, MIR *mir)
{
- if (!cUnit->methodJitMode) {
- genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
- TEMPLATE_RETURN_PROF :
- TEMPLATE_RETURN);
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_RETURN_PROF : TEMPLATE_RETURN);
#if defined(WITH_JIT_TUNING)
- gDvmJit.returnOp++;
+ gDvmJit.returnOp++;
#endif
- int dPC = (int) (cUnit->method->insns + mir->offset);
- /* Insert branch, but defer setting of target */
- ArmLIR *branch = genUnconditionalBranch(cUnit, NULL);
- /* Set up the place holder to reconstruct this Dalvik PC */
- ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
- pcrLabel->opcode = kArmPseudoPCReconstructionCell;
- pcrLabel->operands[0] = dPC;
- pcrLabel->operands[1] = mir->offset;
- /* Insert the place holder to the growable list */
- dvmInsertGrowableList(&cUnit->pcReconstructionList,
- (intptr_t) pcrLabel);
- /* Branch to the PC reconstruction code */
- branch->generic.target = (LIR *) pcrLabel;
- }
- /* TODO: Move result to Thread for non-void returns */
+ int dPC = (int) (cUnit->method->insns + mir->offset);
+ /* Insert branch, but defer setting of target */
+ ArmLIR *branch = genUnconditionalBranch(cUnit, NULL);
+ /* Set up the place holder to reconstruct this Dalvik PC */
+ ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+ pcrLabel->opcode = kArmPseudoPCReconstructionCell;
+ pcrLabel->operands[0] = dPC;
+ pcrLabel->operands[1] = mir->offset;
+ /* Insert the place holder to the growable list */
+ dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
+ /* Branch to the PC reconstruction code */
+ branch->generic.target = (LIR *) pcrLabel;
}
static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir,
@@ -998,7 +993,7 @@ static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir,
}
if (regMask) {
/* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
- opRegRegImm(cUnit, kOpSub, r7, rFP,
+ opRegRegImm(cUnit, kOpSub, r7, r5FP,
sizeof(StackSaveArea) + (dInsn->vA << 2));
/* generate null check */
if (pcrLabel) {
@@ -1027,10 +1022,10 @@ static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
dvmCompilerLockAllTemps(cUnit);
/*
- * r4PC : &rFP[vC]
+ * r4PC : &r5FP[vC]
* r7: &newFP[0]
*/
- opRegRegImm(cUnit, kOpAdd, r4PC, rFP, srcOffset);
+ opRegRegImm(cUnit, kOpAdd, r4PC, r5FP, srcOffset);
/* load [r0 .. min(numArgs,4)] */
regMask = (1 << ((numArgs < 4) ? numArgs : 4)) - 1;
/*
@@ -1042,7 +1037,7 @@ static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
*/
if (numArgs != 0) loadMultiple(cUnit, r4PC, regMask);
- opRegRegImm(cUnit, kOpSub, r7, rFP,
+ opRegRegImm(cUnit, kOpSub, r7, r5FP,
sizeof(StackSaveArea) + (numArgs << 2));
/* generate null check */
if (pcrLabel) {
@@ -1058,9 +1053,9 @@ static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
ArmLIR *loopLabel = NULL;
/*
* r0 contains "this" and it will be used later, so push it to the stack
- * first. Pushing r5 (rFP) is just for stack alignment purposes.
+ * first. Pushing r5FP is just for stack alignment purposes.
*/
- opImm(cUnit, kOpPush, (1 << r0 | 1 << rFP));
+ opImm(cUnit, kOpPush, (1 << r0 | 1 << r5FP));
/* No need to generate the loop structure if numArgs <= 11 */
if (numArgs > 11) {
loadConstant(cUnit, 5, ((numArgs - 4) >> 2) << 2);
@@ -1075,7 +1070,7 @@ static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
loadMultiple(cUnit, r4PC, regMask);
/* No need to generate the loop structure if numArgs <= 11 */
if (numArgs > 11) {
- opRegImm(cUnit, kOpSub, rFP, 4);
+ opRegImm(cUnit, kOpSub, r5FP, 4);
genConditionalBranch(cUnit, kArmCondNe, loopLabel);
}
}
@@ -1093,7 +1088,7 @@ static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
loadMultiple(cUnit, r4PC, regMask);
}
if (numArgs >= 8)
- opImm(cUnit, kOpPop, (1 << r0 | 1 << rFP));
+ opImm(cUnit, kOpPop, (1 << r0 | 1 << r5FP));
/* Save the modulo 4 arguments */
if ((numArgs > 4) && (numArgs % 4)) {
@@ -1119,7 +1114,7 @@ static void genInvokeSingletonCommon(CompilationUnit *cUnit, MIR *mir,
ArmLIR *retChainingCell = &labelList[bb->fallThrough->id];
/* r1 = &retChainingCell */
- ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+ ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
/* r4PC = dalvikCallsite */
loadConstant(cUnit, r4PC,
@@ -1199,11 +1194,11 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
(int) (cUnit->method->insns + mir->offset));
/* r1 = &retChainingCell */
- ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+ ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
addrRetChain->generic.target = (LIR *) retChainingCell;
/* r2 = &predictedChainingCell */
- ArmLIR *predictedChainingCell = opRegRegImm(cUnit, kOpAdd, r2, rpc, 0);
+ ArmLIR *predictedChainingCell = opRegRegImm(cUnit, kOpAdd, r2, r15pc, 0);
predictedChainingCell->generic.target = (LIR *) predChainingCell;
genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
@@ -1248,7 +1243,7 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
LOAD_FUNC_ADDR(cUnit, r7, (int) dvmJitToPatchPredictedChain);
- genRegCopy(cUnit, r1, rSELF);
+ genRegCopy(cUnit, r1, r6SELF);
/*
* r0 = calleeMethod
@@ -1262,7 +1257,7 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
opReg(cUnit, kOpBlx, r7);
/* r1 = &retChainingCell */
- addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+ addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
addrRetChain->generic.target = (LIR *) retChainingCell;
bypassRechaining->generic.target = (LIR *) addrRetChain;
@@ -1281,13 +1276,66 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
genTrap(cUnit, mir->offset, pcrLabel);
}
+/* "this" pointer is already in r0 */
+static void genInvokeVirtualWholeMethod(CompilationUnit *cUnit,
+ MIR *mir,
+ void *calleeAddr,
+ ArmLIR *retChainingCell)
+{
+ CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
+ dvmCompilerLockAllTemps(cUnit);
+
+ loadConstant(cUnit, r1, (int) callsiteInfo->clazz);
+
+ loadWordDisp(cUnit, r0, offsetof(Object, clazz), r2);
+ /* Branch to the slow path if classes are not equal */
+ opRegReg(cUnit, kOpCmp, r1, r2);
+ /*
+ * Set the misPredBranchOver target so that it will be generated when the
+ * code for the non-optimized invoke is generated.
+ */
+ ArmLIR *classCheck = opCondBranch(cUnit, kArmCondNe);
+
+ /* r0 = the Dalvik PC of the callsite */
+ loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
+
+ newLIR2(cUnit, kThumbBl1, (int) calleeAddr, (int) calleeAddr);
+ newLIR2(cUnit, kThumbBl2, (int) calleeAddr, (int) calleeAddr);
+ genUnconditionalBranch(cUnit, retChainingCell);
+
+ /* Target of slow path */
+ ArmLIR *slowPathLabel = newLIR0(cUnit, kArmPseudoTargetLabel);
+
+ slowPathLabel->defMask = ENCODE_ALL;
+ classCheck->generic.target = (LIR *) slowPathLabel;
+
+ // FIXME
+ cUnit->printMe = true;
+}
+
+static void genInvokeSingletonWholeMethod(CompilationUnit *cUnit,
+ MIR *mir,
+ void *calleeAddr,
+ ArmLIR *retChainingCell)
+{
+ /* r0 = the Dalvik PC of the callsite */
+ loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
+
+ newLIR2(cUnit, kThumbBl1, (int) calleeAddr, (int) calleeAddr);
+ newLIR2(cUnit, kThumbBl2, (int) calleeAddr, (int) calleeAddr);
+ genUnconditionalBranch(cUnit, retChainingCell);
+
+ // FIXME
+ cUnit->printMe = true;
+}
+
/* Geneate a branch to go back to the interpreter */
static void genPuntToInterp(CompilationUnit *cUnit, unsigned int offset)
{
/* r0 = dalvik pc */
dvmCompilerFlushAllRegs(cUnit);
loadConstant(cUnit, r0, (int) (cUnit->method->insns + offset));
- loadWordDisp(cUnit, rSELF, offsetof(Thread,
+ loadWordDisp(cUnit, r6SELF, offsetof(Thread,
jitToInterpEntries.dvmJitToInterpPunt), r1);
opReg(cUnit, kOpBlx, r1);
}
@@ -1315,7 +1363,7 @@ static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir)
}
int entryAddr = offsetof(Thread,
jitToInterpEntries.dvmJitToInterpSingleStep);
- loadWordDisp(cUnit, rSELF, entryAddr, r2);
+ loadWordDisp(cUnit, r6SELF, entryAddr, r2);
/* r0 = dalvik pc */
loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
/* r1 = dalvik pc of following instruction */
@@ -1342,7 +1390,7 @@ static void genMonitorPortable(CompilationUnit *cUnit, MIR *mir)
dvmCompilerFlushAllRegs(cUnit); /* Send everything to home location */
RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
loadValueDirectFixed(cUnit, rlSrc, r1);
- genRegCopy(cUnit, r0, rSELF);
+ genRegCopy(cUnit, r0, r6SELF);
genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
if (isEnter) {
/* Get dPC of next insn */
@@ -1375,7 +1423,7 @@ static void genSuspendPoll(CompilationUnit *cUnit, MIR *mir)
{
int rTemp = dvmCompilerAllocTemp(cUnit);
ArmLIR *ld;
- ld = loadWordDisp(cUnit, rSELF, offsetof(Thread, suspendCount),
+ ld = loadWordDisp(cUnit, r6SELF, offsetof(Thread, suspendCount),
rTemp);
setMemRefType(ld, true /* isLoad */, kMustNotAlias);
genRegImmCheck(cUnit, kArmCondNe, rTemp, 0, mir->offset, NULL);
@@ -1403,8 +1451,7 @@ static bool handleFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
* make sure it is dominated by the predecessor.
*/
if (numPredecessors == 1 && bb->taken->visited == false &&
- bb->taken->blockType == kDalvikByteCode &&
- cUnit->methodJitMode == false ) {
+ bb->taken->blockType == kDalvikByteCode) {
cUnit->nextCodegenBlock = bb->taken;
} else {
/* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
@@ -1836,9 +1883,9 @@ static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir)
int resetReg = dvmCompilerAllocTemp(cUnit);
RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
- loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
+ loadWordDisp(cUnit, r6SELF, exOffset, rlResult.lowReg);
loadConstant(cUnit, resetReg, 0);
- storeWordDisp(cUnit, rSELF, exOffset, resetReg);
+ storeWordDisp(cUnit, r6SELF, exOffset, resetReg);
storeValue(cUnit, rlDest, rlResult);
break;
}
@@ -1877,17 +1924,16 @@ static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir)
RegLocation rlDest = LOC_DALVIK_RETURN_VAL;
rlDest.fp = rlSrc.fp;
storeValue(cUnit, rlDest, rlSrc);
- genReturnCommon(cUnit,mir);
+ genReturnCommon(cUnit, mir);
break;
}
case OP_MONITOR_EXIT:
case OP_MONITOR_ENTER:
genMonitor(cUnit, mir);
break;
- case OP_THROW: {
+ case OP_THROW:
genInterpSingleStep(cUnit, mir);
break;
- }
default:
return true;
}
@@ -2884,11 +2930,11 @@ static bool handleFmt31t(CompilationUnit *cUnit, MIR *mir)
loadConstant(cUnit, r0,
(int) (cUnit->method->insns + mir->offset + mir->dalvikInsn.vB));
/* r2 <- pc of the instruction following the blx */
- opRegReg(cUnit, kOpMov, r2, rpc);
+ opRegReg(cUnit, kOpMov, r2, r15pc);
opReg(cUnit, kOpBlx, r4PC);
dvmCompilerClobberCallRegs(cUnit);
/* pc <- computed goto target */
- opRegReg(cUnit, kOpMov, rpc, r0);
+ opRegReg(cUnit, kOpMov, r15pc, r0);
break;
}
default:
@@ -2994,11 +3040,19 @@ static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
else
genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
- /* r0 = calleeMethod */
- loadConstant(cUnit, r0, (int) calleeMethod);
+ if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+ const Method *calleeMethod = mir->meta.callsiteInfo->method;
+ void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+ assert(calleeAddr);
+ genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
+ retChainingCell);
+ } else {
+ /* r0 = calleeMethod */
+ loadConstant(cUnit, r0, (int) calleeMethod);
- genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
- calleeMethod);
+ genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+ calleeMethod);
+ }
break;
}
/* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
@@ -3038,11 +3092,19 @@ static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
genProcessArgsRange(cUnit, mir, dInsn,
NULL /* no null check */);
- /* r0 = calleeMethod */
- loadConstant(cUnit, r0, (int) calleeMethod);
+ if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+ const Method *calleeMethod = mir->meta.callsiteInfo->method;
+ void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+ assert(calleeAddr);
+ genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
+ retChainingCell);
+ } else {
+ /* r0 = calleeMethod */
+ loadConstant(cUnit, r0, (int) calleeMethod);
- genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
- calleeMethod);
+ genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+ calleeMethod);
+ }
break;
}
/*
@@ -3143,12 +3205,12 @@ static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
/* r1 = &retChainingCell */
ArmLIR *addrRetChain =
- opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+ opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
addrRetChain->generic.target = (LIR *) retChainingCell;
/* r2 = &predictedChainingCell */
ArmLIR *predictedChainingCell =
- opRegRegImm(cUnit, kOpAdd, r2, rpc, 0);
+ opRegRegImm(cUnit, kOpAdd, r2, r15pc, 0);
predictedChainingCell->generic.target = (LIR *) predChainingCell;
genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
@@ -3230,7 +3292,7 @@ static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
LOAD_FUNC_ADDR(cUnit, r7, (int) dvmJitToPatchPredictedChain);
- genRegCopy(cUnit, r1, rSELF);
+ genRegCopy(cUnit, r1, r6SELF);
genRegCopy(cUnit, r2, r9);
genRegCopy(cUnit, r3, r10);
@@ -3246,7 +3308,7 @@ static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
opReg(cUnit, kOpBlx, r7);
/* r1 = &retChainingCell */
- addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+ addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
addrRetChain->generic.target = (LIR *) retChainingCell;
bypassRechaining->generic.target = (LIR *) addrRetChain;
@@ -3283,25 +3345,6 @@ static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
return false;
}
-/* "this" pointer is already in r0 */
-static void genValidationForMethodCallee(CompilationUnit *cUnit, MIR *mir,
- ArmLIR **classCheck)
-{
- CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
- dvmCompilerLockAllTemps(cUnit);
-
- loadConstant(cUnit, r1, (int) callsiteInfo->clazz);
-
- loadWordDisp(cUnit, r0, offsetof(Object, clazz), r2);
- /* Branch to the slow path if classes are not equal */
- opRegReg(cUnit, kOpCmp, r1, r2);
- /*
- * Set the misPredBranchOver target so that it will be generated when the
- * code for the non-optimized invoke is generated.
- */
- *classCheck = opCondBranch(cUnit, kArmCondNe);
-}
-
static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
BasicBlock *bb, ArmLIR *labelList)
{
@@ -3338,23 +3381,9 @@ static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
const Method *calleeMethod = mir->meta.callsiteInfo->method;
void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
- if (calleeAddr) {
- ArmLIR *classCheck;
- cUnit->printMe = true;
- genValidationForMethodCallee(cUnit, mir, &classCheck);
- newLIR2(cUnit, kThumbBl1, (int) calleeAddr,
- (int) calleeAddr);
- newLIR2(cUnit, kThumbBl2, (int) calleeAddr,
- (int) calleeAddr);
- genUnconditionalBranch(cUnit, retChainingCell);
-
- /* Target of slow path */
- ArmLIR *slowPathLabel = newLIR0(cUnit,
- kArmPseudoTargetLabel);
-
- slowPathLabel->defMask = ENCODE_ALL;
- classCheck->generic.target = (LIR *) slowPathLabel;
- }
+ assert(calleeAddr);
+ genInvokeVirtualWholeMethod(cUnit, mir, calleeAddr,
+ retChainingCell);
}
genInvokeVirtualCommon(cUnit, mir, methodIndex,
@@ -3580,7 +3609,7 @@ static bool handleExecuteInlineC(CompilationUnit *cUnit, MIR *mir)
dvmCompilerClobber(cUnit, r4PC);
dvmCompilerClobber(cUnit, r7);
int offset = offsetof(Thread, retval);
- opRegRegImm(cUnit, kOpAdd, r4PC, rSELF, offset);
+ opRegRegImm(cUnit, kOpAdd, r4PC, r6SELF, offset);
opImm(cUnit, kOpPush, (1<<r4PC) | (1<<r7));
LOAD_FUNC_ADDR(cUnit, r4PC, fn);
genExportPC(cUnit, mir);
@@ -3588,7 +3617,7 @@ static bool handleExecuteInlineC(CompilationUnit *cUnit, MIR *mir)
loadValueDirect(cUnit, dvmCompilerGetSrc(cUnit, mir, i), i);
}
opReg(cUnit, kOpBlx, r4PC);
- opRegImm(cUnit, kOpAdd, r13, 8);
+ opRegImm(cUnit, kOpAdd, r13sp, 8);
/* NULL? */
ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
@@ -3708,7 +3737,7 @@ static void handleNormalChainingCell(CompilationUnit *cUnit,
* instructions fit the predefined cell size.
*/
insertChainingSwitch(cUnit);
- newLIR3(cUnit, kThumbLdrRRI5, r0, rSELF,
+ newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
offsetof(Thread,
jitToInterpEntries.dvmJitToInterpNormal) >> 2);
newLIR1(cUnit, kThumbBlxR, r0);
@@ -3727,7 +3756,7 @@ static void handleHotChainingCell(CompilationUnit *cUnit,
* instructions fit the predefined cell size.
*/
insertChainingSwitch(cUnit);
- newLIR3(cUnit, kThumbLdrRRI5, r0, rSELF,
+ newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
offsetof(Thread,
jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
newLIR1(cUnit, kThumbBlxR, r0);
@@ -3744,11 +3773,11 @@ static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
*/
insertChainingSwitch(cUnit);
#if defined(WITH_SELF_VERIFICATION)
- newLIR3(cUnit, kThumbLdrRRI5, r0, rSELF,
+ newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
offsetof(Thread,
jitToInterpEntries.dvmJitToInterpBackwardBranch) >> 2);
#else
- newLIR3(cUnit, kThumbLdrRRI5, r0, rSELF,
+ newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
offsetof(Thread, jitToInterpEntries.dvmJitToInterpNormal) >> 2);
#endif
newLIR1(cUnit, kThumbBlxR, r0);
@@ -3764,7 +3793,7 @@ static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
* instructions fit the predefined cell size.
*/
insertChainingSwitch(cUnit);
- newLIR3(cUnit, kThumbLdrRRI5, r0, rSELF,
+ newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
offsetof(Thread,
jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
newLIR1(cUnit, kThumbBlxR, r0);
@@ -4226,7 +4255,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
case kExceptionHandling:
labelList[i].opcode = kArmPseudoEHBlockLabel;
if (cUnit->pcReconstructionList.numUsed) {
- loadWordDisp(cUnit, rSELF, offsetof(Thread,
+ loadWordDisp(cUnit, r6SELF, offsetof(Thread,
jitToInterpEntries.dvmJitToInterpPunt),
r1);
opReg(cUnit, kOpBlx, r1);
@@ -4520,7 +4549,7 @@ gen_fallthrough:
*/
if (cUnit->switchOverflowPad) {
loadConstant(cUnit, r0, (int) cUnit->switchOverflowPad);
- loadWordDisp(cUnit, rSELF, offsetof(Thread,
+ loadWordDisp(cUnit, r6SELF, offsetof(Thread,
jitToInterpEntries.dvmJitToInterpNoChain), r2);
opRegReg(cUnit, kOpAdd, r1, r1);
opRegRegReg(cUnit, kOpAdd, r4PC, r0, r1);
diff --git a/vm/compiler/codegen/arm/FP/ThumbVFP.c b/vm/compiler/codegen/arm/FP/ThumbVFP.c
index bec54ca77..f685f2469 100644
--- a/vm/compiler/codegen/arm/FP/ThumbVFP.c
+++ b/vm/compiler/codegen/arm/FP/ThumbVFP.c
@@ -38,7 +38,7 @@ static void loadValueAddressDirect(CompilationUnit *cUnit, RegLocation rlSrc,
}
dvmCompilerClobber(cUnit, rDest);
dvmCompilerLockTemp(cUnit, rDest);
- opRegRegImm(cUnit, kOpAdd, rDest, rFP,
+ opRegRegImm(cUnit, kOpAdd, rDest, r5FP,
dvmCompilerS2VReg(cUnit, rlSrc.sRegLow) << 2);
}
diff --git a/vm/compiler/codegen/arm/GlobalOptimizations.c b/vm/compiler/codegen/arm/GlobalOptimizations.c
index 872bddfc7..e52bd8a49 100644
--- a/vm/compiler/codegen/arm/GlobalOptimizations.c
+++ b/vm/compiler/codegen/arm/GlobalOptimizations.c
@@ -46,9 +46,13 @@ static void applyRedundantBranchElimination(CompilationUnit *cUnit)
}
/*
- * Found real useful stuff between the branch and the target
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the lastLIRInsn here since with
+ * method-based JIT the branch might be the last real
+ * instruction.
*/
- if (!isPseudoOpcode(nextLIR->opcode))
+ if (!isPseudoOpcode(nextLIR->opcode) ||
+ (nextLIR = (ArmLIR *) cUnit->lastLIRInsn))
break;
}
}
diff --git a/vm/compiler/codegen/arm/Thumb/Factory.c b/vm/compiler/codegen/arm/Thumb/Factory.c
index c0a8c32a9..bd84a2896 100644
--- a/vm/compiler/codegen/arm/Thumb/Factory.c
+++ b/vm/compiler/codegen/arm/Thumb/Factory.c
@@ -170,7 +170,7 @@ static ArmLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdd:
- if ( !neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+ if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
assert((value & 0x3) == 0);
return newLIR1(cUnit, kThumbAddSpI7, value >> 2);
} else if (shortForm) {
@@ -179,7 +179,7 @@ static ArmLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
opcode = kThumbAddRRR;
break;
case kOpSub:
- if (!neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+ if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
assert((value & 0x3) == 0);
return newLIR1(cUnit, kThumbSubSpI7, value >> 2);
} else if (shortForm) {
@@ -257,12 +257,12 @@ static ArmLIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
case kOpAdd:
if (rDest == rSrc1)
return opRegImm(cUnit, op, rDest, value);
- if ((rSrc1 == 13) && (value <= 1020)) { /* sp */
+ if ((rSrc1 == r13sp) && (value <= 1020)) { /* sp */
assert((value & 0x3) == 0);
shortForm = true;
opcode = kThumbAddSpRel;
value >>= 2;
- } else if ((rSrc1 == 15) && (value <= 1020)) { /* pc */
+ } else if ((rSrc1 == r15pc) && (value <= 1020)) { /* pc */
assert((value & 0x3) == 0);
shortForm = true;
opcode = kThumbAddPcRel;
@@ -576,12 +576,12 @@ static ArmLIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase,
}
break;
case kWord:
- if (LOWREG(rDest) && (rBase == rpc) &&
+ if (LOWREG(rDest) && (rBase == r15pc) &&
(displacement <= 1020) && (displacement >= 0)) {
shortForm = true;
encodedDisp >>= 2;
opcode = kThumbLdrPcRel;
- } else if (LOWREG(rDest) && (rBase == r13) &&
+ } else if (LOWREG(rDest) && (rBase == r13sp) &&
(displacement <= 1020) && (displacement >= 0)) {
shortForm = true;
encodedDisp >>= 2;
@@ -640,14 +640,14 @@ static ArmLIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase,
: rDest;
res = loadConstant(cUnit, rTmp, displacement);
load = newLIR3(cUnit, opcode, rDest, rBase, rTmp);
- if (rBase == rFP)
+ if (rBase == r5FP)
annotateDalvikRegAccess(load, displacement >> 2,
true /* isLoad */);
if (rTmp != rDest)
dvmCompilerFreeTemp(cUnit, rTmp);
}
}
- if (rBase == rFP) {
+ if (rBase == r5FP) {
if (load != NULL)
annotateDalvikRegAccess(load, displacement >> 2,
true /* isLoad */);
@@ -757,7 +757,7 @@ static ArmLIR *storeBaseDispBody(CompilationUnit *cUnit, int rBase,
}
dvmCompilerFreeTemp(cUnit, rScratch);
}
- if (rBase == rFP) {
+ if (rBase == r5FP) {
if (store != NULL)
annotateDalvikRegAccess(store, displacement >> 2,
false /* isLoad */);
@@ -875,7 +875,7 @@ static void genSelfVerificationPreBranch(CompilationUnit *cUnit,
*/
ArmLIR *pushFP = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
pushFP->opcode = kThumbPush;
- pushFP->operands[0] = 1 << rFP;
+ pushFP->operands[0] = 1 << r5FP;
setupResourceMasks(pushFP);
dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) pushFP);
@@ -897,17 +897,17 @@ static void genSelfVerificationPostBranch(CompilationUnit *cUnit,
/* Pop memory content(LR) into r5 first */
ArmLIR *popForLR = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
popForLR->opcode = kThumbPop;
- popForLR->operands[0] = 1 << rFP;
+ popForLR->operands[0] = 1 << r5FP;
setupResourceMasks(popForLR);
dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) popForLR);
- ArmLIR *copy = genRegCopyNoInsert(cUnit, rlr, rFP);
+ ArmLIR *copy = genRegCopyNoInsert(cUnit, r14lr, r5FP);
dvmCompilerInsertLIRAfter((LIR *) popForLR, (LIR *) copy);
/* Now restore the original r5 */
ArmLIR *popFP = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
popFP->opcode = kThumbPop;
- popFP->operands[0] = 1 << rFP;
+ popFP->operands[0] = 1 << r5FP;
setupResourceMasks(popFP);
dvmCompilerInsertLIRAfter((LIR *) copy, (LIR *) popFP);
}
diff --git a/vm/compiler/codegen/arm/Thumb/Gen.c b/vm/compiler/codegen/arm/Thumb/Gen.c
index 720553094..18ef76286 100644
--- a/vm/compiler/codegen/arm/Thumb/Gen.c
+++ b/vm/compiler/codegen/arm/Thumb/Gen.c
@@ -59,7 +59,7 @@ static int genTraceProfileEntry(CompilationUnit *cUnit)
if ((gDvmJit.profileMode == kTraceProfilingContinuous) ||
(gDvmJit.profileMode == kTraceProfilingDisabled)) {
/* Thumb instruction used directly here to ensure correct size */
- newLIR2(cUnit, kThumbMovRR_H2L, r0, rpc);
+ newLIR2(cUnit, kThumbMovRR_H2L, r0, r15pc);
newLIR2(cUnit, kThumbSubRI8, r0, 10);
newLIR3(cUnit, kThumbLdrRRI5, r0, r0, 0);
newLIR3(cUnit, kThumbLdrRRI5, r1, r0, 0);
@@ -190,7 +190,7 @@ static ArmLIR *genExportPC(CompilationUnit *cUnit, MIR *mir)
int rAddr = dvmCompilerAllocTemp(cUnit);
int offset = offsetof(StackSaveArea, xtra.currentPc);
res = loadConstant(cUnit, rDPC, (int) (cUnit->method->insns + mir->offset));
- newLIR2(cUnit, kThumbMovRR, rAddr, rFP);
+ newLIR2(cUnit, kThumbMovRR, rAddr, r5FP);
newLIR2(cUnit, kThumbSubRI8, rAddr, sizeof(StackSaveArea) - offset);
storeWordDisp( cUnit, rAddr, 0, rDPC);
return res;
@@ -221,7 +221,7 @@ static bool genInlinedAbsFloat(CompilationUnit *cUnit, MIR *mir)
loadConstant(cUnit, signMask, 0x7fffffff);
newLIR2(cUnit, kThumbAndRR, reg0, signMask);
dvmCompilerFreeTemp(cUnit, signMask);
- storeWordDisp(cUnit, rSELF, offset, reg0);
+ storeWordDisp(cUnit, r6SELF, offset, reg0);
//TUNING: rewrite this to not clobber
dvmCompilerClobber(cUnit, reg0);
return false;
@@ -236,10 +236,10 @@ static bool genInlinedAbsDouble(CompilationUnit *cUnit, MIR *mir)
int reghi = regSrc.highReg;
int signMask = dvmCompilerAllocTemp(cUnit);
loadConstant(cUnit, signMask, 0x7fffffff);
- storeWordDisp(cUnit, rSELF, offset, reglo);
+ storeWordDisp(cUnit, r6SELF, offset, reglo);
newLIR2(cUnit, kThumbAndRR, reghi, signMask);
dvmCompilerFreeTemp(cUnit, signMask);
- storeWordDisp(cUnit, rSELF, offset + 4, reghi);
+ storeWordDisp(cUnit, r6SELF, offset + 4, reghi);
//TUNING: rewrite this to not clobber
dvmCompilerClobber(cUnit, reghi);
return false;
@@ -259,7 +259,7 @@ static bool genInlinedMinMaxInt(CompilationUnit *cUnit, MIR *mir, bool isMin)
newLIR2(cUnit, kThumbMovRR, reg0, reg1);
ArmLIR *target = newLIR0(cUnit, kArmPseudoTargetLabel);
target->defMask = ENCODE_ALL;
- newLIR3(cUnit, kThumbStrRRI5, reg0, rSELF, offset >> 2);
+ newLIR3(cUnit, kThumbStrRRI5, reg0, r6SELF, offset >> 2);
branch1->generic.target = (LIR *)target;
//TUNING: rewrite this to not clobber
dvmCompilerClobber(cUnit,reg0);
diff --git a/vm/compiler/codegen/arm/Thumb2/Factory.c b/vm/compiler/codegen/arm/Thumb2/Factory.c
index f68ef943e..e5adcadaa 100644
--- a/vm/compiler/codegen/arm/Thumb2/Factory.c
+++ b/vm/compiler/codegen/arm/Thumb2/Factory.c
@@ -64,7 +64,7 @@ static ArmLIR *loadFPConstantValue(CompilationUnit *cUnit, int rDest,
loadPcRel->opcode = kThumb2Vldrs;
loadPcRel->generic.target = (LIR *) dataTarget;
loadPcRel->operands[0] = rDest;
- loadPcRel->operands[1] = rpc;
+ loadPcRel->operands[1] = r15pc;
setupResourceMasks(loadPcRel);
setMemRefType(loadPcRel, true, kLiteral);
loadPcRel->aliasInfo = dataTarget->operands[0];
@@ -226,7 +226,7 @@ static ArmLIR *opImm(CompilationUnit *cUnit, OpKind op, int value)
case kOpPush: {
if ((value & 0xff00) == 0) {
opcode = kThumbPush;
- } else if ((value & 0xff00) == (1 << rlr)) {
+ } else if ((value & 0xff00) == (1 << r14lr)) {
/* Thumb push can handle lr, which is encoded by bit 8 */
opcode = kThumbPush;
value = (value & 0xff) | (1<<8);
@@ -238,7 +238,7 @@ static ArmLIR *opImm(CompilationUnit *cUnit, OpKind op, int value)
case kOpPop: {
if ((value & 0xff00) == 0) {
opcode = kThumbPop;
- } else if ((value & 0xff00) == (1 << rpc)) {
+ } else if ((value & 0xff00) == (1 << r15pc)) {
/* Thumb pop can handle pc, which is encoded by bit 8 */
opcode = kThumbPop;
value = (value & 0xff) | (1<<8);
@@ -489,11 +489,11 @@ static ArmLIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
case kOpRor:
return newLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value);
case kOpAdd:
- if (LOWREG(rDest) && (rSrc1 == 13) &&
+ if (LOWREG(rDest) && (rSrc1 == r13sp) &&
(value <= 1020) && ((value & 0x3)==0)) {
return newLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1,
value >> 2);
- } else if (LOWREG(rDest) && (rSrc1 == rpc) &&
+ } else if (LOWREG(rDest) && (rSrc1 == r15pc) &&
(value <= 1020) && ((value & 0x3)==0)) {
return newLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1,
value >> 2);
@@ -590,7 +590,7 @@ static ArmLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdd:
- if ( !neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+ if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
assert((value & 0x3) == 0);
return newLIR1(cUnit, kThumbAddSpI7, value >> 2);
} else if (shortForm) {
@@ -598,7 +598,7 @@ static ArmLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
}
break;
case kOpSub:
- if (!neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+ if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
assert((value & 0x3) == 0);
return newLIR1(cUnit, kThumbSubSpI7, value >> 2);
} else if (shortForm) {
@@ -851,12 +851,12 @@ static ArmLIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase,
}
break;
}
- if (LOWREG(rDest) && (rBase == rpc) &&
+ if (LOWREG(rDest) && (rBase == r15pc) &&
(displacement <= 1020) && (displacement >= 0)) {
shortForm = true;
encodedDisp >>= 2;
opcode = kThumbLdrPcRel;
- } else if (LOWREG(rDest) && (rBase == r13) &&
+ } else if (LOWREG(rDest) && (rBase == r13sp) &&
(displacement <= 1020) && (displacement >= 0)) {
shortForm = true;
encodedDisp >>= 2;
@@ -916,7 +916,7 @@ static ArmLIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase,
dvmCompilerFreeTemp(cUnit, regOffset);
}
- if (rBase == rFP) {
+ if (rBase == r5FP) {
annotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */);
}
#if defined(WITH_SELF_VERIFICATION)
@@ -1029,7 +1029,7 @@ static ArmLIR *storeBaseDispBody(CompilationUnit *cUnit, int rBase,
dvmCompilerFreeTemp(cUnit, rScratch);
}
- if (rBase == rFP) {
+ if (rBase == r5FP) {
annotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */);
}
#if defined(WITH_SELF_VERIFICATION)
@@ -1220,7 +1220,7 @@ static void genSelfVerificationPreBranch(CompilationUnit *cUnit,
ArmLIR *push = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
push->opcode = kThumbPush;
/* Thumb push can handle LR (encoded at bit 8) */
- push->operands[0] = (1 << rFP | 1 << 8);
+ push->operands[0] = (1 << r5FP | 1 << 8);
setupResourceMasks(push);
dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) push);
}
@@ -1230,7 +1230,7 @@ static void genSelfVerificationPostBranch(CompilationUnit *cUnit,
ArmLIR *pop = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
/* Thumb pop cannot store into LR - use Thumb2 here */
pop->opcode = kThumb2Pop;
- pop->operands[0] = (1 << rFP | 1 << rlr);
+ pop->operands[0] = (1 << r5FP | 1 << r14lr);
setupResourceMasks(pop);
dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) pop);
}
diff --git a/vm/compiler/codegen/arm/Thumb2/Gen.c b/vm/compiler/codegen/arm/Thumb2/Gen.c
index 864b0b1c6..f54b7eb49 100644
--- a/vm/compiler/codegen/arm/Thumb2/Gen.c
+++ b/vm/compiler/codegen/arm/Thumb2/Gen.c
@@ -201,7 +201,7 @@ static ArmLIR *genExportPC(CompilationUnit *cUnit, MIR *mir)
int offset = offsetof(StackSaveArea, xtra.currentPc);
int rDPC = dvmCompilerAllocTemp(cUnit);
res = loadConstant(cUnit, rDPC, (int) (cUnit->method->insns + mir->offset));
- newLIR3(cUnit, kThumb2StrRRI8Predec, rDPC, rFP,
+ newLIR3(cUnit, kThumb2StrRRI8Predec, rDPC, r5FP,
sizeof(StackSaveArea) - offset);
dvmCompilerFreeTemp(cUnit, rDPC);
return res;
@@ -247,7 +247,7 @@ static void genMonitorEnter(CompilationUnit *cUnit, MIR *mir)
dvmCompilerLockAllTemps(cUnit); // Prepare for explicit register usage
dvmCompilerFreeTemp(cUnit, r4PC); // Free up r4 for general use
genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
- loadWordDisp(cUnit, rSELF, offsetof(Thread, threadId), r3); // Get threadId
+ loadWordDisp(cUnit, r6SELF, offsetof(Thread, threadId), r3); // Get threadId
newLIR3(cUnit, kThumb2Ldrex, r2, r1,
offsetof(Object, lock) >> 2); // Get object->lock
opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT); // Align owner
@@ -271,11 +271,11 @@ static void genMonitorEnter(CompilationUnit *cUnit, MIR *mir)
loadConstant(cUnit, r4PC, (int)(cUnit->method->insns + mir->offset +
dexGetWidthFromOpcode(OP_MONITOR_ENTER)));
// Export PC (part 2)
- newLIR3(cUnit, kThumb2StrRRI8Predec, r3, rFP,
+ newLIR3(cUnit, kThumb2StrRRI8Predec, r3, r5FP,
sizeof(StackSaveArea) -
offsetof(StackSaveArea, xtra.currentPc));
/* Call template, and don't return */
- genRegCopy(cUnit, r0, rSELF);
+ genRegCopy(cUnit, r0, r6SELF);
genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER);
// Resume here
target = newLIR0(cUnit, kArmPseudoTargetLabel);
@@ -303,7 +303,7 @@ static void genMonitorExit(CompilationUnit *cUnit, MIR *mir)
dvmCompilerFreeTemp(cUnit, r4PC); // Free up r4 for general use
genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
loadWordDisp(cUnit, r1, offsetof(Object, lock), r2); // Get object->lock
- loadWordDisp(cUnit, rSELF, offsetof(Thread, threadId), r3); // Get threadId
+ loadWordDisp(cUnit, r6SELF, offsetof(Thread, threadId), r3); // Get threadId
// Is lock unheld on lock or held by us (==threadId) on unlock?
opRegRegImm(cUnit, kOpAnd, r7, r2,
(LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
@@ -324,9 +324,9 @@ static void genMonitorExit(CompilationUnit *cUnit, MIR *mir)
loadConstant(cUnit, r3, (int) (cUnit->method->insns + mir->offset));
LOAD_FUNC_ADDR(cUnit, r7, (int)dvmUnlockObject);
- genRegCopy(cUnit, r0, rSELF);
+ genRegCopy(cUnit, r0, r6SELF);
// Export PC (part 2)
- newLIR3(cUnit, kThumb2StrRRI8Predec, r3, rFP,
+ newLIR3(cUnit, kThumb2StrRRI8Predec, r3, r5FP,
sizeof(StackSaveArea) -
offsetof(StackSaveArea, xtra.currentPc));
opReg(cUnit, kOpBlx, r7);
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c b/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c
index c25ab8346..5a08b60ac 100644
--- a/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c
+++ b/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c
@@ -14,6 +14,228 @@
* limitations under the License.
*/
+/*
+ * Rebuild the interpreter frame then punt to the interpreter to execute
+ * instruction at specified PC.
+ *
+ * Currently parameters are passed to the current frame, so we just need to
+ * grow the stack save area above it, fill certain fields in StackSaveArea and
+ * Thread that are skipped during whole-method invocation (specified below),
+ * then return to the interpreter.
+ *
+ * StackSaveArea:
+ * - prevSave
+ * - prevFrame
+ * - savedPc
+ * - returnAddr
+ * - method
+ *
+ * Thread:
+ * - method
+ * - methodClassDex
+ * - curFrame
+ */
+static void genMethodInflateAndPunt(CompilationUnit *cUnit, MIR *mir,
+ BasicBlock *bb)
+{
+ int oldStackSave = r0;
+ int newStackSave = r1;
+ int oldFP = r2;
+ int savedPC = r3;
+ int currentPC = r4PC;
+ int returnAddr = r7;
+ int method = r8;
+ int pDvmDex = r9;
+
+ /*
+ * TODO: check whether to raise the stack overflow exception when growing
+ * the stack save area.
+ */
+
+ /* Send everything to home location */
+ dvmCompilerFlushAllRegs(cUnit);
+
+ /* oldStackSave = r5FP + sizeof(current frame) */
+ opRegRegImm(cUnit, kOpAdd, oldStackSave, r5FP,
+ cUnit->method->registersSize * 4);
+ /* oldFP = oldStackSave + sizeof(stackSaveArea) */
+ opRegRegImm(cUnit, kOpAdd, oldFP, oldStackSave, sizeof(StackSaveArea));
+ /* newStackSave = r5FP - sizeof(StackSaveArea) */
+ opRegRegImm(cUnit, kOpSub, newStackSave, r5FP, sizeof(StackSaveArea));
+
+ loadWordDisp(cUnit, r13sp, 0, savedPC);
+ loadConstant(cUnit, currentPC, (int) (cUnit->method->insns + mir->offset));
+ loadConstant(cUnit, method, (int) cUnit->method);
+ loadConstant(cUnit, pDvmDex, (int) cUnit->method->clazz->pDvmDex);
+#ifdef EASY_GDB
+ /* newStackSave->prevSave = oldStackSave */
+ storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, prevSave),
+ oldStackSave);
+#endif
+ /* newStackSave->prevSave = oldStackSave */
+ storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, prevFrame),
+ oldFP);
+ /* newStackSave->savedPc = savedPC */
+ storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, savedPc),
+ savedPC);
+ /* return address */
+ loadConstant(cUnit, returnAddr, 0);
+ storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, returnAddr),
+ returnAddr);
+ /* newStackSave->method = method */
+ storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, method), method);
+ /* thread->method = method */
+ storeWordDisp(cUnit, r6SELF, offsetof(InterpSaveState, method), method);
+ /* thread->curFrame = current FP */
+ storeWordDisp(cUnit, r6SELF, offsetof(Thread, curFrame), r5FP);
+ /* thread->methodClassDex = pDvmDex */
+ storeWordDisp(cUnit, r6SELF, offsetof(InterpSaveState, methodClassDex),
+ pDvmDex);
+ /* Restore the stack pointer */
+ opRegImm(cUnit, kOpAdd, r13sp, 16);
+ genPuntToInterp(cUnit, mir->offset);
+}
+
+/*
+ * The following are the first-level codegen routines that analyze the format
+ * of each bytecode then either dispatch special purpose codegen routines
+ * or produce corresponding Thumb instructions directly.
+ *
+ * TODO - most them are just pass-through to the trace-based versions for now
+ */
+static bool handleMethodFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
+ BasicBlock *bb, ArmLIR *labelList)
+{
+ /* backward branch? */
+ bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+ if (backwardBranch && gDvmJit.genSuspendPoll) {
+ genSuspendPoll(cUnit, mir);
+ }
+
+ /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
+ genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+ return false;
+}
+
+static bool handleMethodFmt10x(CompilationUnit *cUnit, MIR *mir)
+{
+ Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+ switch (dalvikOpcode) {
+ case OP_RETURN_VOID:
+ return false;
+ default:
+ return handleFmt10x(cUnit, mir);
+ }
+}
+
+static bool handleMethodFmt11n_Fmt31i(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt11n_Fmt31i(cUnit, mir);
+}
+
+static bool handleMethodFmt11x(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+ ArmLIR *labelList)
+{
+ Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+ switch (dalvikOpcode) {
+ case OP_THROW:
+ genMethodInflateAndPunt(cUnit, mir, bb);
+ return false;
+ default:
+ return handleFmt11x(cUnit, mir);
+ }
+}
+
+static bool handleMethodFmt12x(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt12x(cUnit, mir);
+}
+
+static bool handleMethodFmt20bc_Fmt40sc(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt20bc_Fmt40sc(cUnit, mir);
+}
+
+static bool handleMethodFmt21c_Fmt31c_Fmt41c(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt21c_Fmt31c_Fmt41c(cUnit, mir);
+}
+
+static bool handleMethodFmt21h(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt21h(cUnit, mir);
+}
+
+static bool handleMethodFmt21s(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt21s(cUnit, mir);
+}
+
+static bool handleMethodFmt21t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+ ArmLIR *labelList)
+{
+ return handleFmt21t(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt22b_Fmt22s(cUnit, mir);
+}
+
+static bool handleMethodFmt22c_Fmt52c(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt22c_Fmt52c(cUnit, mir);
+}
+
+static bool handleMethodFmt22cs(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt22cs(cUnit, mir);
+}
+
+static bool handleMethodFmt22t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+ ArmLIR *labelList)
+{
+ return handleFmt22t(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodFmt22x_Fmt32x(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt22x_Fmt32x(cUnit, mir);
+}
+
+static bool handleMethodFmt23x(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt23x(cUnit, mir);
+}
+
+static bool handleMethodFmt31t(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt31t(cUnit, mir);
+}
+
+static bool handleMethodFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
+ BasicBlock *bb, ArmLIR *labelList)
+{
+ return handleFmt35c_3rc_5rc(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
+ BasicBlock *bb, ArmLIR *labelList)
+{
+ return handleFmt35ms_3rms(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodExecuteInline(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleExecuteInline(cUnit, mir);
+}
+
+static bool handleMethodFmt51l(CompilationUnit *cUnit, MIR *mir)
+{
+ return handleFmt51l(cUnit, mir);
+}
+
/* Handle the content in each basic block */
static bool methodBlockCodeGen(CompilationUnit *cUnit, BasicBlock *bb)
{
@@ -34,12 +256,15 @@ static bool methodBlockCodeGen(CompilationUnit *cUnit, BasicBlock *bb)
ArmLIR *headLIR = NULL;
if (bb->blockType == kMethodEntryBlock) {
- opImm(cUnit, kOpPush, (1 << rlr | 1 << rFP));
- opRegImm(cUnit, kOpSub, rFP,
+ /* r0 = callsitePC */
+ opImm(cUnit, kOpPush, (1 << r0 | 1 << r1 | 1 << r5FP | 1 << r14lr));
+ opRegImm(cUnit, kOpSub, r5FP,
sizeof(StackSaveArea) + cUnit->method->registersSize * 4);
} else if (bb->blockType == kMethodExitBlock) {
- opImm(cUnit, kOpPop, (1 << rpc | 1 << rFP));
+ /* No need to pop r0 and r1 */
+ opRegImm(cUnit, kOpAdd, r13sp, 8);
+ opImm(cUnit, kOpPop, (1 << r5FP | 1 << r15pc));
}
for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
@@ -88,81 +313,81 @@ static bool methodBlockCodeGen(CompilationUnit *cUnit, BasicBlock *bb)
case kFmt10t:
case kFmt20t:
case kFmt30t:
- notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit,
- mir, bb, labelList);
+ notHandled = handleMethodFmt10t_Fmt20t_Fmt30t(cUnit, mir, bb,
+ labelList);
break;
case kFmt10x:
- notHandled = handleFmt10x(cUnit, mir);
+ notHandled = handleMethodFmt10x(cUnit, mir);
break;
case kFmt11n:
case kFmt31i:
- notHandled = handleFmt11n_Fmt31i(cUnit, mir);
+ notHandled = handleMethodFmt11n_Fmt31i(cUnit, mir);
break;
case kFmt11x:
- notHandled = handleFmt11x(cUnit, mir);
+ notHandled = handleMethodFmt11x(cUnit, mir, bb, labelList);
break;
case kFmt12x:
- notHandled = handleFmt12x(cUnit, mir);
+ notHandled = handleMethodFmt12x(cUnit, mir);
break;
case kFmt20bc:
case kFmt40sc:
- notHandled = handleFmt20bc_Fmt40sc(cUnit, mir);
+ notHandled = handleMethodFmt20bc_Fmt40sc(cUnit, mir);
break;
case kFmt21c:
case kFmt31c:
case kFmt41c:
- notHandled = handleFmt21c_Fmt31c_Fmt41c(cUnit, mir);
+ notHandled = handleMethodFmt21c_Fmt31c_Fmt41c(cUnit, mir);
break;
case kFmt21h:
- notHandled = handleFmt21h(cUnit, mir);
+ notHandled = handleMethodFmt21h(cUnit, mir);
break;
case kFmt21s:
- notHandled = handleFmt21s(cUnit, mir);
+ notHandled = handleMethodFmt21s(cUnit, mir);
break;
case kFmt21t:
- notHandled = handleFmt21t(cUnit, mir, bb, labelList);
+ notHandled = handleMethodFmt21t(cUnit, mir, bb, labelList);
break;
case kFmt22b:
case kFmt22s:
- notHandled = handleFmt22b_Fmt22s(cUnit, mir);
+ notHandled = handleMethodFmt22b_Fmt22s(cUnit, mir);
break;
case kFmt22c:
case kFmt52c:
- notHandled = handleFmt22c_Fmt52c(cUnit, mir);
+ notHandled = handleMethodFmt22c_Fmt52c(cUnit, mir);
break;
case kFmt22cs:
- notHandled = handleFmt22cs(cUnit, mir);
+ notHandled = handleMethodFmt22cs(cUnit, mir);
break;
case kFmt22t:
- notHandled = handleFmt22t(cUnit, mir, bb, labelList);
+ notHandled = handleMethodFmt22t(cUnit, mir, bb, labelList);
break;
case kFmt22x:
case kFmt32x:
- notHandled = handleFmt22x_Fmt32x(cUnit, mir);
+ notHandled = handleMethodFmt22x_Fmt32x(cUnit, mir);
break;
case kFmt23x:
- notHandled = handleFmt23x(cUnit, mir);
+ notHandled = handleMethodFmt23x(cUnit, mir);
break;
case kFmt31t:
- notHandled = handleFmt31t(cUnit, mir);
+ notHandled = handleMethodFmt31t(cUnit, mir);
break;
case kFmt3rc:
case kFmt35c:
case kFmt5rc:
- notHandled = handleFmt35c_3rc_5rc(cUnit, mir, bb,
- labelList);
+ notHandled = handleMethodFmt35c_3rc_5rc(cUnit, mir, bb,
+ labelList);
break;
case kFmt3rms:
case kFmt35ms:
- notHandled = handleFmt35ms_3rms(cUnit, mir, bb,
- labelList);
+ notHandled = handleMethodFmt35ms_3rms(cUnit, mir, bb,
+ labelList);
break;
case kFmt35mi:
case kFmt3rmi:
- notHandled = handleExecuteInline(cUnit, mir);
+ notHandled = handleMethodExecuteInline(cUnit, mir);
break;
case kFmt51l:
- notHandled = handleFmt51l(cUnit, mir);
+ notHandled = handleMethodFmt51l(cUnit, mir);
break;
default:
notHandled = true;