diff options
author | Ben Cheng <bccheng@android.com> | 2011-01-14 11:36:46 -0800 |
---|---|---|
committer | Ben Cheng <bccheng@android.com> | 2011-01-26 12:51:49 -0800 |
commit | cfdeca37fcaa27c37bad5077223e4d1e87f1182e (patch) | |
tree | aa556c91c315994df440244de024ebb15f0e9757 | |
parent | 73bfc612a47588c2e657acac44c23bd5668dadf4 (diff) | |
download | android_dalvik-cfdeca37fcaa27c37bad5077223e4d1e87f1182e.tar.gz android_dalvik-cfdeca37fcaa27c37bad5077223e4d1e87f1182e.tar.bz2 android_dalvik-cfdeca37fcaa27c37bad5077223e4d1e87f1182e.zip |
Add runtime support for method based compilation.
Enhanced code cache management to accommodate both trace and method
compilations. Also implemented a hacky dispatch routine for virtual
leaf methods.
Microbenchmark showed 3x speedup in leaf method invocation.
Change-Id: I79d95b7300ba993667b3aa221c1df9c7b0583521
33 files changed, 682 insertions, 183 deletions
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c index a1e3d0e8d..6cd7eb583 100644 --- a/vm/compiler/Compiler.c +++ b/vm/compiler/Compiler.c @@ -644,6 +644,7 @@ static void *compilerThreadStart(void *arg) work.result.codeAddress) { dvmJitSetCodeAddr(work.pc, work.result.codeAddress, work.result.instructionSet, + false, /* not method entry */ work.result.profileCodeSize); } } diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h index d29520d2e..b0ad7b4a5 100644 --- a/vm/compiler/Compiler.h +++ b/vm/compiler/Compiler.h @@ -215,6 +215,7 @@ typedef enum JitMethodAttributes { kIsThrowFree, /* Method doesn't throw */ kIsGetter, /* Method fits the getter pattern */ kIsSetter, /* Method fits the setter pattern */ + kCannotCompile, /* Method cannot be compiled */ } JitMethodAttributes; #define METHOD_IS_CALLEE (1 << kIsCallee) @@ -224,6 +225,7 @@ typedef enum JitMethodAttributes { #define METHOD_IS_THROW_FREE (1 << kIsThrowFree) #define METHOD_IS_GETTER (1 << kIsGetter) #define METHOD_IS_SETTER (1 << kIsSetter) +#define METHOD_CANNOT_COMPILE (1 << kCannotCompile) /* Vectors to provide optimization hints */ typedef enum JitOptimizationHints { @@ -267,7 +269,7 @@ CompilerMethodStats *dvmCompilerAnalyzeMethodBody(const Method *method, bool isCallee); bool dvmCompilerCanIncludeThisInstruction(const Method *method, const DecodedInstruction *insn); -bool dvmCompileMethod(const Method *method); +bool dvmCompileMethod(const Method *method, JitTranslationInfo *info); bool dvmCompileTrace(JitTraceDescription *trace, int numMaxInsts, JitTranslationInfo *info, jmp_buf *bailPtr, int optHints); void dvmCompilerDumpStats(void); @@ -275,7 +277,8 @@ void dvmCompilerDrainQueue(void); void dvmJitUnchainAll(void); void dvmCompilerSortAndPrintTraceProfiles(void); void dvmCompilerPerformSafePointChecks(void); -void dvmCompilerInlineMIR(struct CompilationUnit *cUnit); +void dvmCompilerInlineMIR(struct CompilationUnit *cUnit, + JitTranslationInfo *info); void dvmInitializeSSAConversion(struct CompilationUnit *cUnit); int dvmConvertSSARegToDalvik(const struct CompilationUnit *cUnit, int ssaReg); bool dvmCompilerLoopOpt(struct CompilationUnit *cUnit); diff --git a/vm/compiler/CompilerIR.h b/vm/compiler/CompilerIR.h index 54d41a5c5..dd1d441bc 100644 --- a/vm/compiler/CompilerIR.h +++ b/vm/compiler/CompilerIR.h @@ -99,6 +99,7 @@ typedef enum { kMIRInlined, // Invoke is inlined (ie dead) kMIRInlinedPred, // Invoke is inlined via prediction kMIRCallee, // Instruction is inlined from callee + kMIRInvokeMethodJIT, // Callee is JIT'ed as a whole method } MIROptimizationFlagPositons; #define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck) @@ -108,6 +109,7 @@ typedef enum { #define MIR_INLINED (1 << kMIRInlined) #define MIR_INLINED_PRED (1 << kMIRInlinedPred) #define MIR_CALLEE (1 << kMIRCallee) +#define MIR_INVOKE_METHOD_JIT (1 << kMIRInvokeMethodJIT) typedef struct CallsiteInfo { const ClassObject *clazz; @@ -209,7 +211,6 @@ typedef struct CompilationUnit { bool hasLoop; // Contains a loop bool hasInvoke; // Contains an invoke instruction bool heapMemOp; // Mark mem ops for self verification - bool wholeMethod; int profileCodeSize; // Size of the profile prefix in bytes int numChainingCells[kChainingCellGap]; LIR *firstChainingLIR[kChainingCellGap]; @@ -243,10 +244,12 @@ typedef struct CompilationUnit { const u2 *switchOverflowPad; /* New fields only for method-based compilation */ + bool methodJitMode; int numReachableBlocks; int numDalvikRegisters; // method->registersSize + inlined BasicBlock *entryBlock; BasicBlock *exitBlock; + BasicBlock *curBlock; GrowableList dfsOrder; GrowableList domPostOrderTraversal; BitVector *tryBlockAddr; @@ -255,6 +258,7 @@ typedef struct CompilationUnit { BitVector *tempDalvikRegisterV; BitVector *tempSSARegisterV; // numSSARegs bool printSSANames; + void *blockLabelList; } CompilationUnit; #if defined(WITH_SELF_VERIFICATION) diff --git a/vm/compiler/Dataflow.c b/vm/compiler/Dataflow.c index ba1b8fd59..d935fd5f2 100644 --- a/vm/compiler/Dataflow.c +++ b/vm/compiler/Dataflow.c @@ -1592,7 +1592,7 @@ char *dvmCompilerGetDalvikDisassembly(const DecodedInstruction *insn, char buffer[256]; int opcode = insn->opcode; int dfAttributes = dvmCompilerDataFlowAttributes[opcode]; - int flags = dexGetFlagsFromOpcode(insn->opcode); + int flags; char *ret; buffer[0] = 0; @@ -1603,8 +1603,10 @@ char *dvmCompilerGetDalvikDisassembly(const DecodedInstruction *insn, else { sprintf(buffer, "Opcode 0x%x", opcode); } + flags = 0; } else { strcpy(buffer, dexGetOpcodeName(opcode)); + flags = dexGetFlagsFromOpcode(insn->opcode); } if (note) @@ -1630,7 +1632,8 @@ char *dvmCompilerGetDalvikDisassembly(const DecodedInstruction *insn, offset = (int) insn->vA; break; default: - LOGE("Unexpected branch format: %d", dalvikFormat); + LOGE("Unexpected branch format %d / opcode %#x", dalvikFormat, + opcode); dvmAbort(); break; } diff --git a/vm/compiler/Frontend.c b/vm/compiler/Frontend.c index 1095225b3..c4dbf2794 100644 --- a/vm/compiler/Frontend.c +++ b/vm/compiler/Frontend.c @@ -288,11 +288,12 @@ CompilerMethodStats *dvmCompilerAnalyzeMethodBody(const Method *method, /* For lookup only */ dummyMethodEntry.method = method; - realMethodEntry = - (CompilerMethodStats *) dvmHashTableLookup(gDvmJit.methodStatsTable, hashValue, - &dummyMethodEntry, - (HashCompareFunc) compareMethod, - false); + realMethodEntry = (CompilerMethodStats *) + dvmHashTableLookup(gDvmJit.methodStatsTable, + hashValue, + &dummyMethodEntry, + (HashCompareFunc) compareMethod, + false); /* This method has never been analyzed before - create an entry */ if (realMethodEntry == NULL) { @@ -440,7 +441,7 @@ bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts, #endif /* If we've already compiled this trace, just return success */ - if (dvmJitGetCodeAddr(startCodePtr) && !info->discardResult) { + if (dvmJitGetTraceAddr(startCodePtr) && !info->discardResult) { /* * Make sure the codeAddress is NULL so that it won't clobber the * existing entry. @@ -588,11 +589,12 @@ bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts, int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode); if (flags & kInstrInvoke) { + const Method *calleeMethod = (const Method *) currRun[2].meta; assert(numInsts == 1); CallsiteInfo *callsiteInfo = (CallsiteInfo *)dvmCompilerNew(sizeof(CallsiteInfo), true); callsiteInfo->clazz = (ClassObject *)currRun[1].meta; - callsiteInfo->method = (Method *)currRun[2].meta; + callsiteInfo->method = calleeMethod; insn->meta.callsiteInfo = callsiteInfo; } @@ -870,7 +872,7 @@ bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts, /* Inline transformation @ the MIR level */ if (cUnit.hasInvoke && !(gDvmJit.disableOpt & (1 << kMethodInlining))) { - dvmCompilerInlineMIR(&cUnit); + dvmCompilerInlineMIR(&cUnit, info); } cUnit.numDalvikRegisters = cUnit.method->registersSize; @@ -921,6 +923,7 @@ bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts, } while (cUnit.assemblerStatus == kRetryAll); if (cUnit.printMe) { + LOGD("Trace Dalvik PC: %p", startCodePtr); dvmCompilerCodegenDump(&cUnit); LOGD("End %s%s, %d Dalvik instructions", desc->method->clazz->descriptor, desc->method->name, @@ -941,14 +944,6 @@ bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts, methodStats->nativeSize += cUnit.totalSize; #endif - /* FIXME - to exercise the method parser, uncomment the following code */ -#if 0 - bool dvmCompileMethod(const Method *method); - if (desc->trace[0].frag.startOffset == 0) { - dvmCompileMethod(desc->method); - } -#endif - return info->codeAddress != NULL; } @@ -1671,7 +1666,7 @@ static void processCanThrow(CompilationUnit *cUnit, BasicBlock *curBlock, * TODO: implementation will be revisited when the trace builder can provide * whole-method traces. */ -bool dvmCompileMethod(const Method *method) +bool dvmCompileMethod(const Method *method, JitTranslationInfo *info) { CompilationUnit cUnit; const DexCode *dexCode = dvmGetMethodCode(method); @@ -1680,12 +1675,27 @@ bool dvmCompileMethod(const Method *method) int numBlocks = 0; unsigned int curOffset = 0; + /* Method already compiled */ + if (dvmJitGetMethodAddr(codePtr)) { + info->codeAddress = NULL; + return false; + } + memset(&cUnit, 0, sizeof(cUnit)); cUnit.method = method; + cUnit.methodJitMode = true; + /* Initialize the block list */ dvmInitGrowableList(&cUnit.blockList, 4); + /* + * FIXME - PC reconstruction list won't be needed after the codegen routines + * are enhanced to true method mode. + */ + /* Initialize the PC reconstruction list */ + dvmInitGrowableList(&cUnit.pcReconstructionList, 8); + /* Allocate the bit-vector to track the beginning of basic blocks */ BitVector *tryBlockAddr = dvmCompilerAllocBitVector(dexCode->insnsSize, true /* expandable */); @@ -1789,6 +1799,10 @@ bool dvmCompileMethod(const Method *method) } } + if (cUnit.printMe) { + dvmCompilerDumpCompilationUnit(&cUnit); + } + /* Adjust this value accordingly once inlining is performed */ cUnit.numDalvikRegisters = cUnit.method->registersSize; @@ -1802,10 +1816,41 @@ bool dvmCompileMethod(const Method *method) /* Perform SSA transformation for the whole method */ dvmCompilerMethodSSATransformation(&cUnit); - if (cUnit.printMe) dumpCFG(&cUnit, "/data/tombstones/"); + dvmCompilerInitializeRegAlloc(&cUnit); // Needs to happen after SSA naming - /* Reset the compiler resource pool */ - dvmCompilerArenaReset(); + /* Allocate Registers using simple local allocation scheme */ + dvmCompilerLocalRegAlloc(&cUnit); + + /* Convert MIR to LIR, etc. */ + dvmCompilerMethodMIR2LIR(&cUnit); + + // Debugging only + //dumpCFG(&cUnit, "/data/tombstones/"); + + /* Method is not empty */ + if (cUnit.firstLIRInsn) { + /* Convert LIR into machine code. Loop for recoverable retries */ + do { + dvmCompilerAssembleLIR(&cUnit, info); + cUnit.assemblerRetries++; + if (cUnit.printMe && cUnit.assemblerStatus != kSuccess) + LOGD("Assembler abort #%d on %d",cUnit.assemblerRetries, + cUnit.assemblerStatus); + } while (cUnit.assemblerStatus == kRetryAll); + + if (cUnit.printMe) { + dvmCompilerCodegenDump(&cUnit); + } + + if (info->codeAddress) { + dvmJitSetCodeAddr(dexCode->insns, info->codeAddress, + info->instructionSet, true, 0); + /* + * Clear the codeAddress for the enclosing trace to reuse the info + */ + info->codeAddress = NULL; + } + } return false; } diff --git a/vm/compiler/InlineTransformation.c b/vm/compiler/InlineTransformation.c index cab790cac..6cf2d43be 100644 --- a/vm/compiler/InlineTransformation.c +++ b/vm/compiler/InlineTransformation.c @@ -34,7 +34,7 @@ static inline u4 convertRegId(const DecodedInstruction *invoke, } } -static void inlineGetter(CompilationUnit *cUnit, +static bool inlineGetter(CompilationUnit *cUnit, const Method *calleeMethod, MIR *invokeMIR, BasicBlock *invokeBB, @@ -49,7 +49,7 @@ static void inlineGetter(CompilationUnit *cUnit, dexDecodeInstruction(calleeMethod->insns, &getterInsn); if (!dvmCompilerCanIncludeThisInstruction(calleeMethod, &getterInsn)) - return; + return false; /* * Some getters (especially invoked through interface) are not followed @@ -59,7 +59,7 @@ static void inlineGetter(CompilationUnit *cUnit, (moveResultMIR->dalvikInsn.opcode != OP_MOVE_RESULT && moveResultMIR->dalvikInsn.opcode != OP_MOVE_RESULT_OBJECT && moveResultMIR->dalvikInsn.opcode != OP_MOVE_RESULT_WIDE)) { - return; + return false; } int dfFlags = dvmCompilerDataFlowAttributes[getterInsn.opcode]; @@ -124,10 +124,10 @@ static void inlineGetter(CompilationUnit *cUnit, #endif } - return; + return true; } -static void inlineSetter(CompilationUnit *cUnit, +static bool inlineSetter(CompilationUnit *cUnit, const Method *calleeMethod, MIR *invokeMIR, BasicBlock *invokeBB, @@ -140,7 +140,7 @@ static void inlineSetter(CompilationUnit *cUnit, dexDecodeInstruction(calleeMethod->insns, &setterInsn); if (!dvmCompilerCanIncludeThisInstruction(calleeMethod, &setterInsn)) - return; + return false; int dfFlags = dvmCompilerDataFlowAttributes[setterInsn.opcode]; @@ -205,17 +205,17 @@ static void inlineSetter(CompilationUnit *cUnit, #endif } - return; + return true; } -static void tryInlineSingletonCallsite(CompilationUnit *cUnit, +static bool tryInlineSingletonCallsite(CompilationUnit *cUnit, const Method *calleeMethod, MIR *invokeMIR, BasicBlock *invokeBB, bool isRange) { /* Not a Java method */ - if (dvmIsNativeMethod(calleeMethod)) return; + if (dvmIsNativeMethod(calleeMethod)) return false; CompilerMethodStats *methodStats = dvmCompilerAnalyzeMethodBody(calleeMethod, true); @@ -229,19 +229,20 @@ static void tryInlineSingletonCallsite(CompilationUnit *cUnit, * the PC reconstruction or chaining cell). */ invokeBB->needFallThroughBranch = true; - return; + return true; } if (methodStats->attributes & METHOD_IS_GETTER) { - inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, false, isRange); - return; + return inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, false, + isRange); } else if (methodStats->attributes & METHOD_IS_SETTER) { - inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, false, isRange); - return; + return inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, false, + isRange); } + return false; } -static void inlineEmptyVirtualCallee(CompilationUnit *cUnit, +static bool inlineEmptyVirtualCallee(CompilationUnit *cUnit, const Method *calleeMethod, MIR *invokeMIR, BasicBlock *invokeBB) @@ -252,37 +253,39 @@ static void inlineEmptyVirtualCallee(CompilationUnit *cUnit, dvmCompilerInsertMIRAfter(invokeBB, invokeMIR, invokeMIRSlow); invokeMIRSlow->OptimizationFlags |= MIR_INLINED_PRED; + return true; } -static void tryInlineVirtualCallsite(CompilationUnit *cUnit, +static bool tryInlineVirtualCallsite(CompilationUnit *cUnit, const Method *calleeMethod, MIR *invokeMIR, BasicBlock *invokeBB, bool isRange) { /* Not a Java method */ - if (dvmIsNativeMethod(calleeMethod)) return; + if (dvmIsNativeMethod(calleeMethod)) return false; CompilerMethodStats *methodStats = dvmCompilerAnalyzeMethodBody(calleeMethod, true); /* Empty callee - do nothing by checking the clazz pointer */ if (methodStats->attributes & METHOD_IS_EMPTY) { - inlineEmptyVirtualCallee(cUnit, calleeMethod, invokeMIR, invokeBB); - return; + return inlineEmptyVirtualCallee(cUnit, calleeMethod, invokeMIR, + invokeBB); } if (methodStats->attributes & METHOD_IS_GETTER) { - inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, true, isRange); - return; + return inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, true, + isRange); } else if (methodStats->attributes & METHOD_IS_SETTER) { - inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, true, isRange); - return; + return inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, true, + isRange); } + return false; } -void dvmCompilerInlineMIR(CompilationUnit *cUnit) +void dvmCompilerInlineMIR(CompilationUnit *cUnit, JitTranslationInfo *info) { bool isRange = false; GrowableListIterator iterator; @@ -337,8 +340,30 @@ void dvmCompilerInlineMIR(CompilationUnit *cUnit) } if (calleeMethod) { - tryInlineSingletonCallsite(cUnit, calleeMethod, lastMIRInsn, bb, - isRange); + bool inlined = tryInlineSingletonCallsite(cUnit, calleeMethod, + lastMIRInsn, bb, isRange); + if (!inlined && + !(gDvmJit.disableOpt & (1 << kMethodJit)) && + !dvmIsNativeMethod(calleeMethod)) { + CompilerMethodStats *methodStats = + dvmCompilerAnalyzeMethodBody(calleeMethod, true); + if ((methodStats->attributes & METHOD_IS_LEAF) && + !(methodStats->attributes & METHOD_CANNOT_COMPILE)) { + /* Callee has been previously compiled */ + if (dvmJitGetMethodAddr(calleeMethod->insns)) { + lastMIRInsn->OptimizationFlags |= MIR_INVOKE_METHOD_JIT; + } else { + /* Compile the callee first */ + dvmCompileMethod(calleeMethod, info); + if (dvmJitGetMethodAddr(calleeMethod->insns)) { + lastMIRInsn->OptimizationFlags |= + MIR_INVOKE_METHOD_JIT; + } else { + methodStats->attributes |= METHOD_CANNOT_COMPILE; + } + } + } + } return; } @@ -360,8 +385,30 @@ void dvmCompilerInlineMIR(CompilationUnit *cUnit) } if (calleeMethod) { - tryInlineVirtualCallsite(cUnit, calleeMethod, lastMIRInsn, bb, - isRange); + bool inlined = tryInlineVirtualCallsite(cUnit, calleeMethod, + lastMIRInsn, bb, isRange); + if (!inlined && + !(gDvmJit.disableOpt & (1 << kMethodJit)) && + !dvmIsNativeMethod(calleeMethod)) { + CompilerMethodStats *methodStats = + dvmCompilerAnalyzeMethodBody(calleeMethod, true); + if ((methodStats->attributes & METHOD_IS_LEAF) && + !(methodStats->attributes & METHOD_CANNOT_COMPILE)) { + /* Callee has been previously compiled */ + if (dvmJitGetMethodAddr(calleeMethod->insns)) { + lastMIRInsn->OptimizationFlags |= MIR_INVOKE_METHOD_JIT; + } else { + /* Compile the callee first */ + dvmCompileMethod(calleeMethod, info); + if (dvmJitGetMethodAddr(calleeMethod->insns)) { + lastMIRInsn->OptimizationFlags |= + MIR_INVOKE_METHOD_JIT; + } else { + methodStats->attributes |= METHOD_CANNOT_COMPILE; + } + } + } + } return; } } diff --git a/vm/compiler/MethodSSATransformation.c b/vm/compiler/MethodSSATransformation.c index 60eea334a..eaee24ae4 100644 --- a/vm/compiler/MethodSSATransformation.c +++ b/vm/compiler/MethodSSATransformation.c @@ -29,8 +29,8 @@ static void recordDFSPreOrder(CompilationUnit *cUnit, BasicBlock *block) /* Enqueue the block id */ dvmInsertGrowableList(&cUnit->dfsOrder, block->id); - if (block->taken) recordDFSPreOrder(cUnit, block->taken); if (block->fallThrough) recordDFSPreOrder(cUnit, block->fallThrough); + if (block->taken) recordDFSPreOrder(cUnit, block->taken); if (block->successorBlockList.blockListType != kNotUsed) { GrowableListIterator iterator; dvmGrowableListIteratorInit(&block->successorBlockList.blocks, @@ -185,13 +185,6 @@ static bool computeDominanceFrontier(CompilationUnit *cUnit, BasicBlock *bb) } } } - if (cUnit->printMe) { - char blockName[BLOCK_NAME_LEN]; - dvmGetBlockName(bb, blockName); - dvmDumpBlockBitVector(blockList, blockName, bb->domFrontier, - cUnit->numBlocks); - } - return true; } @@ -399,11 +392,11 @@ static void insertPhiNodes(CompilationUnit *cUnit) int dalvikReg; const GrowableList *blockList = &cUnit->blockList; BitVector *phiBlocks = - dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false); + dvmCompilerAllocBitVector(cUnit->numBlocks, false); BitVector *tmpBlocks = - dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false); + dvmCompilerAllocBitVector(cUnit->numBlocks, false); BitVector *inputBlocks = - dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false); + dvmCompilerAllocBitVector(cUnit->numBlocks, false); cUnit->tempDalvikRegisterV = dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false); diff --git a/vm/compiler/codegen/CompilerCodegen.h b/vm/compiler/codegen/CompilerCodegen.h index 70a2bbd64..9cd4847c0 100644 --- a/vm/compiler/codegen/CompilerCodegen.h +++ b/vm/compiler/codegen/CompilerCodegen.h @@ -28,6 +28,9 @@ bool dvmCompilerDoWork(CompilerWorkOrder *work); /* Lower middle-level IR to low-level IR */ void dvmCompilerMIR2LIR(CompilationUnit *cUnit); +/* Lower middle-level IR to low-level IR for the whole method */ +void dvmCompilerMethodMIR2LIR(CompilationUnit *cUnit); + /* Assemble LIR into machine code */ void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info); diff --git a/vm/compiler/codegen/Optimizer.h b/vm/compiler/codegen/Optimizer.h index d42fe8738..2b05476c0 100644 --- a/vm/compiler/codegen/Optimizer.h +++ b/vm/compiler/codegen/Optimizer.h @@ -29,6 +29,7 @@ typedef enum optControlVector { kTrackLiveTemps, kSuppressLoads, kMethodInlining, + kMethodJit, } optControlVector; /* Forward declarations */ diff --git a/vm/compiler/codegen/arm/ArchFactory.c b/vm/compiler/codegen/arm/ArchFactory.c index 581ba395c..805a6fc48 100644 --- a/vm/compiler/codegen/arm/ArchFactory.c +++ b/vm/compiler/codegen/arm/ArchFactory.c @@ -32,7 +32,21 @@ static TGT_LIR *genRegImmCheck(CompilationUnit *cUnit, TGT_LIR *pcrLabel) { TGT_LIR *branch = genCmpImmBranch(cUnit, cond, reg, checkValue); - return genCheckCommon(cUnit, dOffset, branch, pcrLabel); + if (cUnit->methodJitMode) { + BasicBlock *bb = cUnit->curBlock; + if (bb->taken) { + ArmLIR *exceptionLabel = (ArmLIR *) cUnit->blockLabelList; + exceptionLabel += bb->taken->id; + branch->generic.target = (LIR *) exceptionLabel; + return exceptionLabel; + } else { + LOGE("Catch blocks not handled yet"); + dvmAbort(); + return NULL; + } + } else { + return genCheckCommon(cUnit, dOffset, branch, pcrLabel); + } } /* diff --git a/vm/compiler/codegen/arm/ArchUtility.c b/vm/compiler/codegen/arm/ArchUtility.c index 7a4d307b5..5af4f3b5c 100644 --- a/vm/compiler/codegen/arm/ArchUtility.c +++ b/vm/compiler/codegen/arm/ArchUtility.c @@ -195,9 +195,10 @@ static void buildInsnString(char *fmt, ArmLIR *lir, char* buf, } break; case 't': - sprintf(tbuf,"0x%08x", + sprintf(tbuf,"0x%08x (L%p)", (int) baseAddr + lir->generic.offset + 4 + - (operand << 1)); + (operand << 1), + lir->generic.target); break; case 'u': { int offset_1 = lir->operands[0]; @@ -302,8 +303,6 @@ void dvmDumpLIRInsn(LIR *arg, unsigned char *baseAddr) case kArmPseudoSSARep: DUMP_SSA_REP(LOGD("-------- %s\n", (char *) dest)); break; - case kArmPseudoTargetLabel: - break; case kArmPseudoChainingCellBackwardBranch: LOGD("-------- chaining cell (backward branch): 0x%04x\n", dest); break; @@ -344,8 +343,9 @@ void dvmDumpLIRInsn(LIR *arg, unsigned char *baseAddr) case kArmPseudoEHBlockLabel: LOGD("Exception_Handling:\n"); break; + case kArmPseudoTargetLabel: case kArmPseudoNormalBlockLabel: - LOGD("L%#06x:\n", dest); + LOGD("L%p:\n", lir); break; default: if (lir->isNop && !dumpNop) { diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c index 577f68320..efae8fd37 100644 --- a/vm/compiler/codegen/arm/Assemble.c +++ b/vm/compiler/codegen/arm/Assemble.c @@ -1032,6 +1032,17 @@ static AssemblerStatus assembleInstructions(CompilationUnit *cUnit, lir->operands[0] = (delta >> 12) & 0x7ff; NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff; + } else if (lir->opcode == kThumbBl1) { + assert(NEXT_LIR(lir)->opcode == kThumbBl2); + /* Both curPC and target are Thumb */ + intptr_t curPC = startAddr + lir->generic.offset + 4; + intptr_t target = lir->operands[1]; + + int delta = target - curPC; + assert((delta >= -(1<<22)) && (delta <= ((1<<22)-2))); + + lir->operands[0] = (delta >> 12) & 0x7ff; + NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff; } ArmEncodingMap *encoder = &EncodingMap[lir->opcode]; @@ -1213,8 +1224,8 @@ void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info) int i; ChainCellCounts chainCellCounts; int descSize = - cUnit->wholeMethod ? 0 : jitTraceDescriptionSize(cUnit->traceDesc); - int chainingCellGap; + cUnit->methodJitMode ? 0 : jitTraceDescriptionSize(cUnit->traceDesc); + int chainingCellGap = 0; info->instructionSet = cUnit->instructionSet; @@ -1240,30 +1251,34 @@ void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info) /* Const values have to be word aligned */ offset = (offset + 3) & ~3; - /* - * Get the gap (# of u4) between the offset of chaining cell count and - * the bottom of real chaining cells. If the translation has chaining - * cells, the gap is guaranteed to be multiples of 4. - */ - chainingCellGap = (offset - cUnit->chainingCellBottom->offset) >> 2; - - /* Add space for chain cell counts & trace description */ u4 chainCellOffset = offset; - ArmLIR *chainCellOffsetLIR = (ArmLIR *) cUnit->chainCellOffsetLIR; - assert(chainCellOffsetLIR); - assert(chainCellOffset < 0x10000); - assert(chainCellOffsetLIR->opcode == kArm16BitData && - chainCellOffsetLIR->operands[0] == CHAIN_CELL_OFFSET_TAG); + ArmLIR *chainCellOffsetLIR = NULL; - /* - * Adjust the CHAIN_CELL_OFFSET_TAG LIR's offset to remove the - * space occupied by the pointer to the trace profiling counter. - */ - chainCellOffsetLIR->operands[0] = chainCellOffset - 4; + if (!cUnit->methodJitMode) { + /* + * Get the gap (# of u4) between the offset of chaining cell count and + * the bottom of real chaining cells. If the translation has chaining + * cells, the gap is guaranteed to be multiples of 4. + */ + chainingCellGap = (offset - cUnit->chainingCellBottom->offset) >> 2; + + /* Add space for chain cell counts & trace description */ + chainCellOffsetLIR = (ArmLIR *) cUnit->chainCellOffsetLIR; + assert(chainCellOffsetLIR); + assert(chainCellOffset < 0x10000); + assert(chainCellOffsetLIR->opcode == kArm16BitData && + chainCellOffsetLIR->operands[0] == CHAIN_CELL_OFFSET_TAG); - offset += sizeof(chainCellCounts) + descSize; + /* + * Adjust the CHAIN_CELL_OFFSET_TAG LIR's offset to remove the + * space occupied by the pointer to the trace profiling counter. + */ + chainCellOffsetLIR->operands[0] = chainCellOffset - 4; - assert((offset & 0x3) == 0); /* Should still be word aligned */ + offset += sizeof(chainCellCounts) + descSize; + + assert((offset & 0x3) == 0); /* Should still be word aligned */ + } /* Set up offsets for literals */ cUnit->dataOffset = offset; @@ -1301,8 +1316,10 @@ void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info) break; case kRetryAll: if (cUnit->assemblerRetries < MAX_ASSEMBLER_RETRIES) { - /* Restore pristine chain cell marker on retry */ - chainCellOffsetLIR->operands[0] = CHAIN_CELL_OFFSET_TAG; + if (!cUnit->methodJitMode) { + /* Restore pristine chain cell marker on retry */ + chainCellOffsetLIR->operands[0] = CHAIN_CELL_OFFSET_TAG; + } return; } /* Too many retries - reset and try cutting the trace in half */ @@ -1351,20 +1368,23 @@ void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info) memcpy((char*)cUnit->baseAddr, cUnit->codeBuffer, chainCellOffset); gDvmJit.numCompilations++; - /* Install the chaining cell counts */ - for (i=0; i< kChainingCellGap; i++) { - chainCellCounts.u.count[i] = cUnit->numChainingCells[i]; - } + if (!cUnit->methodJitMode) { + /* Install the chaining cell counts */ + for (i=0; i< kChainingCellGap; i++) { + chainCellCounts.u.count[i] = cUnit->numChainingCells[i]; + } - /* Set the gap number in the chaining cell count structure */ - chainCellCounts.u.count[kChainingCellGap] = chainingCellGap; + /* Set the gap number in the chaining cell count structure */ + chainCellCounts.u.count[kChainingCellGap] = chainingCellGap; - memcpy((char*)cUnit->baseAddr + chainCellOffset, &chainCellCounts, - sizeof(chainCellCounts)); + memcpy((char*)cUnit->baseAddr + chainCellOffset, &chainCellCounts, + sizeof(chainCellCounts)); - /* Install the trace description */ - memcpy((char*)cUnit->baseAddr + chainCellOffset + sizeof(chainCellCounts), - cUnit->traceDesc, descSize); + /* Install the trace description */ + memcpy((char*) cUnit->baseAddr + chainCellOffset + + sizeof(chainCellCounts), + cUnit->traceDesc, descSize); + } /* Write the literals directly into the code cache */ installDataContent(cUnit); @@ -1609,7 +1629,7 @@ const Method *dvmJitToPatchPredictedChain(const Method *method, PROTECT_CODE_CACHE(cell, sizeof(*cell)); goto done; } - int tgtAddr = (int) dvmJitGetCodeAddr(method->insns); + int tgtAddr = (int) dvmJitGetTraceAddr(method->insns); /* * Compilation not made yet for the callee. Reset the counter to a small @@ -1808,14 +1828,16 @@ void dvmJitUnchainAll() for (i = 0; i < gDvmJit.jitTableSize; i++) { if (gDvmJit.pJitEntryTable[i].dPC && - gDvmJit.pJitEntryTable[i].codeAddress && - (gDvmJit.pJitEntryTable[i].codeAddress != - dvmCompilerGetInterpretTemplate())) { + !gDvmJit.pJitEntryTable[i].u.info.isMethodEntry && + gDvmJit.pJitEntryTable[i].codeAddress && + (gDvmJit.pJitEntryTable[i].codeAddress != + dvmCompilerGetInterpretTemplate())) { u4* lastAddress; lastAddress = dvmJitUnchain(gDvmJit.pJitEntryTable[i].codeAddress); if (lowAddress == NULL || - (u4*)gDvmJit.pJitEntryTable[i].codeAddress < lowAddress) + (u4*)gDvmJit.pJitEntryTable[i].codeAddress < + lowAddress) lowAddress = lastAddress; if (lastAddress > highAddress) highAddress = lastAddress; diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c index f017b3168..8e1b09c60 100644 --- a/vm/compiler/codegen/arm/CodegenDriver.c +++ b/vm/compiler/codegen/arm/CodegenDriver.c @@ -904,24 +904,28 @@ static ArmLIR *genUnconditionalBranch(CompilationUnit *cUnit, ArmLIR *target) /* Perform the actual operation for OP_RETURN_* */ static void genReturnCommon(CompilationUnit *cUnit, MIR *mir) { - genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ? - TEMPLATE_RETURN_PROF : - TEMPLATE_RETURN); + if (!cUnit->methodJitMode) { + genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ? + TEMPLATE_RETURN_PROF : + TEMPLATE_RETURN); #if defined(WITH_JIT_TUNING) - gDvmJit.returnOp++; + gDvmJit.returnOp++; #endif - int dPC = (int) (cUnit->method->insns + mir->offset); - /* Insert branch, but defer setting of target */ - ArmLIR *branch = genUnconditionalBranch(cUnit, NULL); - /* Set up the place holder to reconstruct this Dalvik PC */ - ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); - pcrLabel->opcode = kArmPseudoPCReconstructionCell; - pcrLabel->operands[0] = dPC; - pcrLabel->operands[1] = mir->offset; - /* Insert the place holder to the growable list */ - dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel); - /* Branch to the PC reconstruction code */ - branch->generic.target = (LIR *) pcrLabel; + int dPC = (int) (cUnit->method->insns + mir->offset); + /* Insert branch, but defer setting of target */ + ArmLIR *branch = genUnconditionalBranch(cUnit, NULL); + /* Set up the place holder to reconstruct this Dalvik PC */ + ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); + pcrLabel->opcode = kArmPseudoPCReconstructionCell; + pcrLabel->operands[0] = dPC; + pcrLabel->operands[1] = mir->offset; + /* Insert the place holder to the growable list */ + dvmInsertGrowableList(&cUnit->pcReconstructionList, + (intptr_t) pcrLabel); + /* Branch to the PC reconstruction code */ + branch->generic.target = (LIR *) pcrLabel; + } + /* TODO: Move result to InterpState for non-void returns */ } static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir, @@ -3197,6 +3201,25 @@ static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir, return false; } +/* "this" pointer is already in r0 */ +static void genValidationForMethodCallee(CompilationUnit *cUnit, MIR *mir, + ArmLIR **classCheck) +{ + CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo; + dvmCompilerLockAllTemps(cUnit); + + loadConstant(cUnit, r1, (int) callsiteInfo->clazz); + + loadWordDisp(cUnit, r0, offsetof(Object, clazz), r2); + /* Branch to the slow path if classes are not equal */ + opRegReg(cUnit, kOpCmp, r1, r2); + /* + * Set the misPredBranchOver target so that it will be generated when the + * code for the non-optimized invoke is generated. + */ + *classCheck = opCondBranch(cUnit, kArmCondNe); +} + static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb, ArmLIR *labelList) { @@ -3229,6 +3252,29 @@ static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir, else genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel); + + if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) { + const Method *calleeMethod = mir->meta.callsiteInfo->method; + void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns); + if (calleeAddr) { + ArmLIR *classCheck; + cUnit->printMe = true; + genValidationForMethodCallee(cUnit, mir, &classCheck); + newLIR2(cUnit, kThumbBl1, (int) calleeAddr, + (int) calleeAddr); + newLIR2(cUnit, kThumbBl2, (int) calleeAddr, + (int) calleeAddr); + genUnconditionalBranch(cUnit, retChainingCell); + + /* Target of slow path */ + ArmLIR *slowPathLabel = newLIR0(cUnit, + kArmPseudoTargetLabel); + + slowPathLabel->defMask = ENCODE_ALL; + classCheck->generic.target = (LIR *) slowPathLabel; + } + } + genInvokeVirtualCommon(cUnit, mir, methodIndex, retChainingCell, predChainingCell, diff --git a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c index f584ce72c..c857fa53b 100644 --- a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c @@ -79,6 +79,10 @@ bool dvmCompilerArchVariantInit(void) LOGE("InterpState.jitToInterpEntries size overflow"); dvmAbort(); } + + /* No method JIT for Thumb backend */ + gDvmJit.disableOpt |= (1 << kMethodJit); + return true; } diff --git a/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c b/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c index d17965d81..a2d77eadb 100644 --- a/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c +++ b/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c @@ -49,5 +49,8 @@ /* MIR2LIR dispatcher and architectural independent codegen routines */ #include "../CodegenDriver.c" +/* Dummy driver for method-based JIT */ +#include "../armv5te/MethodCodegenDriver.c" + /* Architecture manifest */ #include "ArchVariant.c" diff --git a/vm/compiler/codegen/arm/armv5te/ArchVariant.c b/vm/compiler/codegen/arm/armv5te/ArchVariant.c index cf48d4ef8..0f16839ba 100644 --- a/vm/compiler/codegen/arm/armv5te/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv5te/ArchVariant.c @@ -79,6 +79,10 @@ bool dvmCompilerArchVariantInit(void) LOGE("InterpState.jitToInterpEntries size overflow"); dvmAbort(); } + + /* No method JIT for Thumb backend */ + gDvmJit.disableOpt |= (1 << kMethodJit); + return true; } diff --git a/vm/compiler/codegen/arm/armv5te/Codegen.c b/vm/compiler/codegen/arm/armv5te/Codegen.c index 03c1435fe..f74d968ca 100644 --- a/vm/compiler/codegen/arm/armv5te/Codegen.c +++ b/vm/compiler/codegen/arm/armv5te/Codegen.c @@ -49,5 +49,8 @@ /* MIR2LIR dispatcher and architectural independent codegen routines */ #include "../CodegenDriver.c" +/* Dummy driver for method-based JIT */ +#include "MethodCodegenDriver.c" + /* Architecture manifest */ #include "ArchVariant.c" diff --git a/vm/compiler/codegen/arm/armv5te/MethodCodegenDriver.c b/vm/compiler/codegen/arm/armv5te/MethodCodegenDriver.c new file mode 100644 index 000000000..20779f3e9 --- /dev/null +++ b/vm/compiler/codegen/arm/armv5te/MethodCodegenDriver.c @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +void dvmCompilerMethodMIR2LIR(CompilationUnit *cUnit) +{ + LOGE("Method-based JIT not supported for the v5te target"); + dvmAbort(); +} diff --git a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c index 7fcf031cb..3df1095c6 100644 --- a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c @@ -74,6 +74,10 @@ bool dvmCompilerArchVariantInit(void) LOGE("InterpState.jitToInterpEntries size overflow"); dvmAbort(); } + + /* FIXME - comment out the following to enable method-based JIT */ + gDvmJit.disableOpt |= (1 << kMethodJit); + return true; } diff --git a/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c b/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c index f0b772296..439add5ef 100644 --- a/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c +++ b/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c @@ -49,5 +49,8 @@ /* MIR2LIR dispatcher and architectural independent codegen routines */ #include "../CodegenDriver.c" +/* Driver for method-based JIT */ +#include "MethodCodegenDriver.c" + /* Architecture manifest */ #include "ArchVariant.c" diff --git a/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c b/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c new file mode 100644 index 000000000..c25ab8346 --- /dev/null +++ b/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c @@ -0,0 +1,227 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Handle the content in each basic block */ +static bool methodBlockCodeGen(CompilationUnit *cUnit, BasicBlock *bb) +{ + MIR *mir; + ArmLIR *labelList = (ArmLIR *) cUnit->blockLabelList; + int blockId = bb->id; + + cUnit->curBlock = bb; + labelList[blockId].operands[0] = bb->startOffset; + + /* Insert the block label */ + labelList[blockId].opcode = kArmPseudoNormalBlockLabel; + dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]); + + dvmCompilerClobberAllRegs(cUnit); + dvmCompilerResetNullCheck(cUnit); + + ArmLIR *headLIR = NULL; + + if (bb->blockType == kMethodEntryBlock) { + opImm(cUnit, kOpPush, (1 << rlr | 1 << rFP)); + opRegImm(cUnit, kOpSub, rFP, + sizeof(StackSaveArea) + cUnit->method->registersSize * 4); + + } else if (bb->blockType == kMethodExitBlock) { + opImm(cUnit, kOpPop, (1 << rpc | 1 << rFP)); + } + + for (mir = bb->firstMIRInsn; mir; mir = mir->next) { + + dvmCompilerResetRegPool(cUnit); + if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) { + dvmCompilerClobberAllRegs(cUnit); + } + + if (gDvmJit.disableOpt & (1 << kSuppressLoads)) { + dvmCompilerResetDefTracking(cUnit); + } + + Opcode dalvikOpcode = mir->dalvikInsn.opcode; + InstructionFormat dalvikFormat = + dexGetFormatFromOpcode(dalvikOpcode); + + ArmLIR *boundaryLIR; + + /* + * Don't generate the boundary LIR unless we are debugging this + * trace or we need a scheduling barrier. + */ + if (headLIR == NULL || cUnit->printMe == true) { + boundaryLIR = + newLIR2(cUnit, kArmPseudoDalvikByteCodeBoundary, + mir->offset, + (int) dvmCompilerGetDalvikDisassembly( + &mir->dalvikInsn, "")); + /* Remember the first LIR for this block */ + if (headLIR == NULL) { + headLIR = boundaryLIR; + /* Set the first boundaryLIR as a scheduling barrier */ + headLIR->defMask = ENCODE_ALL; + } + } + + /* Don't generate the SSA annotation unless verbose mode is on */ + if (cUnit->printMe && mir->ssaRep) { + char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep); + newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString); + } + + bool notHandled; + switch (dalvikFormat) { + case kFmt10t: + case kFmt20t: + case kFmt30t: + notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit, + mir, bb, labelList); + break; + case kFmt10x: + notHandled = handleFmt10x(cUnit, mir); + break; + case kFmt11n: + case kFmt31i: + notHandled = handleFmt11n_Fmt31i(cUnit, mir); + break; + case kFmt11x: + notHandled = handleFmt11x(cUnit, mir); + break; + case kFmt12x: + notHandled = handleFmt12x(cUnit, mir); + break; + case kFmt20bc: + case kFmt40sc: + notHandled = handleFmt20bc_Fmt40sc(cUnit, mir); + break; + case kFmt21c: + case kFmt31c: + case kFmt41c: + notHandled = handleFmt21c_Fmt31c_Fmt41c(cUnit, mir); + break; + case kFmt21h: + notHandled = handleFmt21h(cUnit, mir); + break; + case kFmt21s: + notHandled = handleFmt21s(cUnit, mir); + break; + case kFmt21t: + notHandled = handleFmt21t(cUnit, mir, bb, labelList); + break; + case kFmt22b: + case kFmt22s: + notHandled = handleFmt22b_Fmt22s(cUnit, mir); + break; + case kFmt22c: + case kFmt52c: + notHandled = handleFmt22c_Fmt52c(cUnit, mir); + break; + case kFmt22cs: + notHandled = handleFmt22cs(cUnit, mir); + break; + case kFmt22t: + notHandled = handleFmt22t(cUnit, mir, bb, labelList); + break; + case kFmt22x: + case kFmt32x: + notHandled = handleFmt22x_Fmt32x(cUnit, mir); + break; + case kFmt23x: + notHandled = handleFmt23x(cUnit, mir); + break; + case kFmt31t: + notHandled = handleFmt31t(cUnit, mir); + break; + case kFmt3rc: + case kFmt35c: + case kFmt5rc: + notHandled = handleFmt35c_3rc_5rc(cUnit, mir, bb, + labelList); + break; + case kFmt3rms: + case kFmt35ms: + notHandled = handleFmt35ms_3rms(cUnit, mir, bb, + labelList); + break; + case kFmt35mi: + case kFmt3rmi: + notHandled = handleExecuteInline(cUnit, mir); + break; + case kFmt51l: + notHandled = handleFmt51l(cUnit, mir); + break; + default: + notHandled = true; + break; + } + + /* FIXME - to be implemented */ + if (notHandled == true && dalvikOpcode >= kNumPackedOpcodes) { + notHandled = false; + } + + if (notHandled) { + LOGE("%#06x: Opcode 0x%x (%s) / Fmt %d not handled\n", + mir->offset, + dalvikOpcode, dexGetOpcodeName(dalvikOpcode), + dalvikFormat); + dvmCompilerAbort(cUnit); + break; + } + } + + if (headLIR) { + /* + * Eliminate redundant loads/stores and delay stores into later + * slots + */ + dvmCompilerApplyLocalOptimizations(cUnit, (LIR *) headLIR, + cUnit->lastLIRInsn); + + /* + * Generate an unconditional branch to the fallthrough block. + */ + if (bb->fallThrough) { + genUnconditionalBranch(cUnit, + &labelList[bb->fallThrough->id]); + } + } + return false; +} + +void dvmCompilerMethodMIR2LIR(CompilationUnit *cUnit) +{ + // FIXME - enable method compilation for selected routines here + if (strcmp(cUnit->method->name, "add")) return; + + /* Used to hold the labels of each block */ + cUnit->blockLabelList = + (void *) dvmCompilerNew(sizeof(ArmLIR) * cUnit->numBlocks, true); + + dvmCompilerDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen, + kPreOrderDFSTraversal, + false /* isIterative */); + + dvmCompilerApplyGlobalOptimizations(cUnit); + + // FIXME - temporarily enable verbose printing for all methods + cUnit->printMe = true; + +#if defined(WITH_SELF_VERIFICATION) + selfVerificationBranchInsertPass(cUnit); +#endif +} diff --git a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c index 7fcf031cb..3df1095c6 100644 --- a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c @@ -74,6 +74,10 @@ bool dvmCompilerArchVariantInit(void) LOGE("InterpState.jitToInterpEntries size overflow"); dvmAbort(); } + + /* FIXME - comment out the following to enable method-based JIT */ + gDvmJit.disableOpt |= (1 << kMethodJit); + return true; } diff --git a/vm/compiler/codegen/arm/armv7-a/Codegen.c b/vm/compiler/codegen/arm/armv7-a/Codegen.c index 05dda0c4d..36771ef56 100644 --- a/vm/compiler/codegen/arm/armv7-a/Codegen.c +++ b/vm/compiler/codegen/arm/armv7-a/Codegen.c @@ -49,5 +49,8 @@ /* MIR2LIR dispatcher and architectural independent codegen routines */ #include "../CodegenDriver.c" +/* Driver for method-based JIT */ +#include "../armv7-a-neon/MethodCodegenDriver.c" + /* Architecture manifest */ #include "ArchVariant.c" diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c index 0e5531443..a2f217597 100644 --- a/vm/interp/Jit.c +++ b/vm/interp/Jit.c @@ -548,18 +548,25 @@ void dvmJitAbortTraceSelect(InterpState* interpState) * Find an entry in the JitTable, creating if necessary. * Returns null if table is full. */ -static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked) +static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked, + bool isMethodEntry) { u4 chainEndMarker = gDvmJit.jitTableSize; u4 idx = dvmJitHash(dPC); - /* Walk the bucket chain to find an exact match for our PC */ + /* + * Walk the bucket chain to find an exact match for our PC and trace/method + * type + */ while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) && - (gDvmJit.pJitEntryTable[idx].dPC != dPC)) { + ((gDvmJit.pJitEntryTable[idx].dPC != dPC) || + (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != + isMethodEntry))) { idx = gDvmJit.pJitEntryTable[idx].u.info.chain; } - if (gDvmJit.pJitEntryTable[idx].dPC != dPC) { + if (gDvmJit.pJitEntryTable[idx].dPC != dPC || + gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) { /* * No match. Aquire jitTableLock and find the last * slot in the chain. Possibly continue the chain walk in case @@ -578,7 +585,9 @@ static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked) if (gDvmJit.pJitEntryTable[idx].dPC != NULL) { u4 prev; while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { - if (gDvmJit.pJitEntryTable[idx].dPC == dPC) { + if (gDvmJit.pJitEntryTable[idx].dPC == dPC && + gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == + isMethodEntry) { /* Another thread got there first for this dPC */ if (!callerLocked) dvmUnlockMutex(&gDvmJit.tableLock); @@ -617,10 +626,13 @@ static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked) } } if (gDvmJit.pJitEntryTable[idx].dPC == NULL) { + gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry; /* * Initialize codeAddress and allocate the slot. Must * happen in this order (since dPC is set, the entry is live. */ + android_atomic_release_store((int32_t)dPC, + (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC); gDvmJit.pJitEntryTable[idx].dPC = dPC; gDvmJit.jitTableEntriesUsed++; } else { @@ -895,7 +907,9 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState, * successfully entered a work order. */ JitEntry *jitEntry = - lookupAndAdd(interpState->currTraceHead, false); + lookupAndAdd(interpState->currTraceHead, + false /* lock */, + false /* method entry */); if (jitEntry) { setTraceConstruction(jitEntry, false); } @@ -978,16 +992,17 @@ JitEntry *dvmFindJitEntry(const u2* pc) } /* - * If a translated code address exists for the davik byte code - * pointer return it. This routine needs to be fast. + * Walk through the JIT profile table and find the corresponding JIT code, in + * the specified format (ie trace vs method). This routine needs to be fast. */ -void* dvmJitGetCodeAddr(const u2* dPC) +void* getCodeAddrCommon(const u2* dPC, bool methodEntry) { int idx = dvmJitHash(dPC); - const u2* npc = gDvmJit.pJitEntryTable[idx].dPC; - if (npc != NULL) { + const u2* pc = gDvmJit.pJitEntryTable[idx].dPC; + if (pc != NULL) { bool hideTranslation = dvmJitHideTranslation(); - if (npc == dPC) { + if (pc == dPC && + gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) { int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ? 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset; intptr_t codeAddress = @@ -1000,7 +1015,9 @@ void* dvmJitGetCodeAddr(const u2* dPC) int chainEndMarker = gDvmJit.jitTableSize; while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { idx = gDvmJit.pJitEntryTable[idx].u.info.chain; - if (gDvmJit.pJitEntryTable[idx].dPC == dPC) { + if (gDvmJit.pJitEntryTable[idx].dPC == dPC && + gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == + methodEntry) { int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ? 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset; @@ -1022,6 +1039,24 @@ void* dvmJitGetCodeAddr(const u2* dPC) } /* + * If a translated code address, in trace format, exists for the davik byte code + * pointer return it. + */ +void* dvmJitGetTraceAddr(const u2* dPC) +{ + return getCodeAddrCommon(dPC, false /* method entry */); +} + +/* + * If a translated code address, in whole-method format, exists for the davik + * byte code pointer return it. + */ +void* dvmJitGetMethodAddr(const u2* dPC) +{ + return getCodeAddrCommon(dPC, true /* method entry */); +} + +/* * Register the translated code pointer into the JitTable. * NOTE: Once a codeAddress field transitions from initial state to * JIT'd code, it must not be altered without first halting all @@ -1034,16 +1069,17 @@ void* dvmJitGetCodeAddr(const u2* dPC) * template cannot handle a non-zero prefix. */ void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set, - int profilePrefixSize) + bool isMethodEntry, int profilePrefixSize) { JitEntryInfoUnion oldValue; JitEntryInfoUnion newValue; - JitEntry *jitEntry = lookupAndAdd(dPC, false); + JitEntry *jitEntry = lookupAndAdd(dPC, false, isMethodEntry); assert(jitEntry); /* Note: order of update is important */ do { oldValue = jitEntry->u; newValue = oldValue; + newValue.info.isMethodEntry = isMethodEntry; newValue.info.instructionSet = set; } while (android_atomic_release_cas( oldValue.infoWord, newValue.infoWord, @@ -1152,7 +1188,9 @@ bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState) */ if (interpState->jitState == kJitTSelectRequest || interpState->jitState == kJitTSelectRequestHot) { - JitEntry *slot = lookupAndAdd(interpState->pc, false); + JitEntry *slot = lookupAndAdd(interpState->pc, + false /* lock */, + false /* method entry */); if (slot == NULL) { /* * Table is full. This should have been @@ -1294,7 +1332,8 @@ bool dvmJitResizeJitTable( unsigned int size ) if (pOldTable[i].dPC) { JitEntry *p; u2 chain; - p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/ ); + p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/, + pOldTable[i].u.info.isMethodEntry); p->codeAddress = pOldTable[i].codeAddress; /* We need to preserve the new chain field, but copy the rest */ chain = p->u.info.chain; diff --git a/vm/interp/Jit.h b/vm/interp/Jit.h index dda14d2bf..e86d548c6 100644 --- a/vm/interp/Jit.h +++ b/vm/interp/Jit.h @@ -134,7 +134,8 @@ typedef struct JitEntry { int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState, const ClassObject *callsiteClass, const Method* curMethod); -void* dvmJitGetCodeAddr(const u2* dPC); +void* dvmJitGetTraceAddr(const u2* dPC); +void* dvmJitGetMethodAddr(const u2* dPC); bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState); void dvmJitStopTranslationRequests(void); void dvmJitStats(void); @@ -144,7 +145,7 @@ struct JitEntry *dvmFindJitEntry(const u2* pc); s8 dvmJitd2l(double d); s8 dvmJitf2l(float f); void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set, - int profilePrefixSize); + bool isMethodEntry, int profilePrefixSize); void dvmJitAbortTraceSelect(InterpState* interpState); JitTraceCounter_t *dvmJitNextTraceCounter(void); void dvmJitTraceProfilingOff(void); diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S index c13cfc850..d704c7791 100644 --- a/vm/mterp/armv5te/footer.S +++ b/vm/mterp/armv5te/footer.S @@ -138,7 +138,7 @@ dvmJitToInterpTraceSelectNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -158,7 +158,7 @@ dvmJitToInterpTraceSelect: add rINST,lr,#-5 @ save start of chain branch add rINST, #-4 @ .. which is 9 bytes back mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq 2f @@ -205,7 +205,7 @@ dvmJitToInterpNormal: bl dvmBumpNormal #endif mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq toInterpreter @ go if not, otherwise do chain @@ -228,7 +228,7 @@ dvmJitToInterpNoChainNoProfile: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -251,7 +251,7 @@ dvmJitToInterpNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -304,7 +304,7 @@ common_updateProfile: strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter EXPORT_PC() mov r0,rPC - bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) + bl dvmJitGetTraceAddr @ r0<- dvmJitGetTraceAddr(rPC) str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S index 665f501fd..6cb562060 100644 --- a/vm/mterp/out/InterpAsm-armv5te-vfp.S +++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S @@ -13294,7 +13294,7 @@ dvmJitToInterpTraceSelectNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13314,7 +13314,7 @@ dvmJitToInterpTraceSelect: add rINST,lr,#-5 @ save start of chain branch add rINST, #-4 @ .. which is 9 bytes back mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq 2f @@ -13361,7 +13361,7 @@ dvmJitToInterpNormal: bl dvmBumpNormal #endif mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq toInterpreter @ go if not, otherwise do chain @@ -13384,7 +13384,7 @@ dvmJitToInterpNoChainNoProfile: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13407,7 +13407,7 @@ dvmJitToInterpNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13460,7 +13460,7 @@ common_updateProfile: strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter EXPORT_PC() mov r0,rPC - bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) + bl dvmJitGetTraceAddr @ r0<- dvmJitGetTraceAddr(rPC) str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S index 5fef4bd1f..bf719ad20 100644 --- a/vm/mterp/out/InterpAsm-armv5te.S +++ b/vm/mterp/out/InterpAsm-armv5te.S @@ -13752,7 +13752,7 @@ dvmJitToInterpTraceSelectNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13772,7 +13772,7 @@ dvmJitToInterpTraceSelect: add rINST,lr,#-5 @ save start of chain branch add rINST, #-4 @ .. which is 9 bytes back mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq 2f @@ -13819,7 +13819,7 @@ dvmJitToInterpNormal: bl dvmBumpNormal #endif mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq toInterpreter @ go if not, otherwise do chain @@ -13842,7 +13842,7 @@ dvmJitToInterpNoChainNoProfile: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13865,7 +13865,7 @@ dvmJitToInterpNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13918,7 +13918,7 @@ common_updateProfile: strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter EXPORT_PC() mov r0,rPC - bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) + bl dvmJitGetTraceAddr @ r0<- dvmJitGetTraceAddr(rPC) str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S index 5423a7a8a..0cacb5d5f 100644 --- a/vm/mterp/out/InterpAsm-armv7-a-neon.S +++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S @@ -13232,7 +13232,7 @@ dvmJitToInterpTraceSelectNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13252,7 +13252,7 @@ dvmJitToInterpTraceSelect: add rINST,lr,#-5 @ save start of chain branch add rINST, #-4 @ .. which is 9 bytes back mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq 2f @@ -13299,7 +13299,7 @@ dvmJitToInterpNormal: bl dvmBumpNormal #endif mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq toInterpreter @ go if not, otherwise do chain @@ -13322,7 +13322,7 @@ dvmJitToInterpNoChainNoProfile: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13345,7 +13345,7 @@ dvmJitToInterpNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13398,7 +13398,7 @@ common_updateProfile: strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter EXPORT_PC() mov r0,rPC - bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) + bl dvmJitGetTraceAddr @ r0<- dvmJitGetTraceAddr(rPC) str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S index 7a1c6eb33..2ab215e4f 100644 --- a/vm/mterp/out/InterpAsm-armv7-a.S +++ b/vm/mterp/out/InterpAsm-armv7-a.S @@ -13232,7 +13232,7 @@ dvmJitToInterpTraceSelectNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13252,7 +13252,7 @@ dvmJitToInterpTraceSelect: add rINST,lr,#-5 @ save start of chain branch add rINST, #-4 @ .. which is 9 bytes back mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq 2f @@ -13299,7 +13299,7 @@ dvmJitToInterpNormal: bl dvmBumpNormal #endif mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag cmp r0,#0 beq toInterpreter @ go if not, otherwise do chain @@ -13322,7 +13322,7 @@ dvmJitToInterpNoChainNoProfile: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13345,7 +13345,7 @@ dvmJitToInterpNoChain: #endif ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self mov r0,rPC - bl dvmJitGetCodeAddr @ Is there a translation? + bl dvmJitGetTraceAddr @ Is there a translation? str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET @@ -13398,7 +13398,7 @@ common_updateProfile: strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter EXPORT_PC() mov r0,rPC - bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) + bl dvmJitGetTraceAddr @ r0<- dvmJitGetTraceAddr(rPC) str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag mov r1, rPC @ arg1 of translation may need this mov lr, #0 @ in case target is HANDLER_INTERPRET diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S index f42f59274..7e6279845 100644 --- a/vm/mterp/out/InterpAsm-x86.S +++ b/vm/mterp/out/InterpAsm-x86.S @@ -13318,7 +13318,7 @@ dvmJitToInterpNoChainNoProfile: call dvmBumpNoChain #endif movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # is there a translation? + call dvmJitGetTraceAddr # is there a translation? movl rGLUE,%ecx movl offGlue_self(%ecx), %ecx # ecx <- glue->self movl %eax,offThread_inJitCodeCache(%ecx) # set inJitCodeCache flag @@ -13341,7 +13341,7 @@ dvmJitToInterpTraceSelectNoChain: call dvmBumpNoChain #endif movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # is there a translation? + call dvmJitGetTraceAddr # is there a translation? movl rGLUE,%ecx movl offGlue_self(%ecx),%ecx cmpl $0,%eax @@ -13371,7 +13371,7 @@ dvmJitToInterpTraceSelect: pop rINST # save chain cell address in callee save reg movl (rINST),rPC movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # is there a translation? + call dvmJitGetTraceAddr # is there a translation? cmpl $0,%eax jz 1b # no - ask for one movl %eax,OUT_ARG0(%esp) @@ -13435,7 +13435,7 @@ common_updateProfile: movb rINSTbl,(%edx,%eax) # reset counter movl offGlue_self(%ecx),rINST movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # already have one? + call dvmJitGetTraceAddr # already have one? movl %eax,offThread_inJitCodeCache(rINST) # set the inJitCodeCache flag cmpl $0,%eax jz 1f diff --git a/vm/mterp/x86-atom/TODO.txt b/vm/mterp/x86-atom/TODO.txt index 4d8f1732a..7dc624eac 100644 --- a/vm/mterp/x86-atom/TODO.txt +++ b/vm/mterp/x86-atom/TODO.txt @@ -11,6 +11,7 @@ Items requiring attention: (hi) Add implementations for jumbo opcodes (40 instructions) (hi) Implement OP_DISPATCH_FF for real. (Right now it's treated as an unused instruction.) +(hi) Rename dvmJitGetCodeAddr to dvmJitGetTraceAddr. (md) Correct OP_MONITOR_EXIT (need to adjust PC before throw) (md) OP_THROW needs to export the PC diff --git a/vm/mterp/x86/footer.S b/vm/mterp/x86/footer.S index 0724f425d..053871a77 100644 --- a/vm/mterp/x86/footer.S +++ b/vm/mterp/x86/footer.S @@ -72,7 +72,7 @@ dvmJitToInterpNoChainNoProfile: call dvmBumpNoChain #endif movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # is there a translation? + call dvmJitGetTraceAddr # is there a translation? movl rGLUE,%ecx movl offGlue_self(%ecx), %ecx # ecx <- glue->self movl %eax,offThread_inJitCodeCache(%ecx) # set inJitCodeCache flag @@ -95,7 +95,7 @@ dvmJitToInterpTraceSelectNoChain: call dvmBumpNoChain #endif movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # is there a translation? + call dvmJitGetTraceAddr # is there a translation? movl rGLUE,%ecx movl offGlue_self(%ecx),%ecx cmpl $$0,%eax @@ -125,7 +125,7 @@ dvmJitToInterpTraceSelect: pop rINST # save chain cell address in callee save reg movl (rINST),rPC movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # is there a translation? + call dvmJitGetTraceAddr # is there a translation? cmpl $$0,%eax jz 1b # no - ask for one movl %eax,OUT_ARG0(%esp) @@ -189,7 +189,7 @@ common_updateProfile: movb rINSTbl,(%edx,%eax) # reset counter movl offGlue_self(%ecx),rINST movl rPC,OUT_ARG0(%esp) - call dvmJitGetCodeAddr # already have one? + call dvmJitGetTraceAddr # already have one? movl %eax,offThread_inJitCodeCache(rINST) # set the inJitCodeCache flag cmpl $$0,%eax jz 1f |