summaryrefslogtreecommitdiffstats
path: root/vm/interp
diff options
context:
space:
mode:
authorJean-Baptiste Queru <jbq@google.com>2009-11-12 18:45:15 -0800
committerJean-Baptiste Queru <jbq@google.com>2009-11-12 18:45:15 -0800
commit72e93344b4d1ffc71e9c832ec23de0657e5b04a5 (patch)
tree1a08d1e43d54200ea737234d865c4668c5d3535b /vm/interp
parentdfd0afbcb08b871e224a28ecb4ed427a7693545c (diff)
downloadandroid_dalvik-72e93344b4d1ffc71e9c832ec23de0657e5b04a5.tar.gz
android_dalvik-72e93344b4d1ffc71e9c832ec23de0657e5b04a5.tar.bz2
android_dalvik-72e93344b4d1ffc71e9c832ec23de0657e5b04a5.zip
eclair snapshot
Diffstat (limited to 'vm/interp')
-rw-r--r--vm/interp/Interp.c297
-rw-r--r--vm/interp/Interp.h8
-rw-r--r--vm/interp/InterpDefs.h86
-rw-r--r--vm/interp/Jit.c701
-rw-r--r--vm/interp/Jit.h82
-rw-r--r--vm/interp/Stack.c170
-rw-r--r--vm/interp/Stack.h22
7 files changed, 1279 insertions, 87 deletions
diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c
index 27b9582ab..233ee3f7b 100644
--- a/vm/interp/Interp.c
+++ b/vm/interp/Interp.c
@@ -180,7 +180,7 @@ bool dvmAddSingleStep(Thread* thread, int size, int depth)
const StackSaveArea* saveArea;
void* fp;
void* prevFp = NULL;
-
+
for (fp = thread->curFrame; fp != NULL; fp = saveArea->prevFrame) {
const Method* method;
@@ -225,7 +225,7 @@ bool dvmAddSingleStep(Thread* thread, int size, int depth)
} else {
pCtrl->line = dvmLineNumFromPC(saveArea->method,
saveArea->xtra.currentPc - saveArea->method->insns);
- pCtrl->pAddressSet
+ pCtrl->pAddressSet
= dvmAddressSetForLine(saveArea->method, pCtrl->line);
}
pCtrl->frameDepth = dvmComputeVagueFrameDepth(thread, thread->curFrame);
@@ -374,7 +374,7 @@ void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly)
* ===========================================================================
*/
-/*
+/*
* Construct an s4 from two consecutive half-words of switch data.
* This needs to check endianness because the DEX optimizer only swaps
* half-words in instruction stream.
@@ -389,6 +389,7 @@ static inline s4 s4FromSwitchData(const void* switchData) {
static inline s4 s4FromSwitchData(const void* switchData) {
u2* data = switchData;
return data[0] | (((s4) data[1]) << 16);
+}
#endif
/*
@@ -478,7 +479,7 @@ s4 dvmInterpHandleSparseSwitch(const u2* switchData, s4 testVal)
size = *switchData++;
assert(size > 0);
-
+
/* The keys are guaranteed to be aligned on a 32-bit boundary;
* we can treat them as a native int array.
*/
@@ -514,6 +515,62 @@ s4 dvmInterpHandleSparseSwitch(const u2* switchData, s4 testVal)
}
/*
+ * Copy data for a fill-array-data instruction. On a little-endian machine
+ * we can just do a memcpy(), on a big-endian system we have work to do.
+ *
+ * The trick here is that dexopt has byte-swapped each code unit, which is
+ * exactly what we want for short/char data. For byte data we need to undo
+ * the swap, and for 4- or 8-byte values we need to swap pieces within
+ * each word.
+ */
+static void copySwappedArrayData(void* dest, const u2* src, u4 size, u2 width)
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ memcpy(dest, src, size*width);
+#else
+ int i;
+
+ switch (width) {
+ case 1:
+ /* un-swap pairs of bytes as we go */
+ for (i = (size-1) & ~1; i >= 0; i -= 2) {
+ ((u1*)dest)[i] = ((u1*)src)[i+1];
+ ((u1*)dest)[i+1] = ((u1*)src)[i];
+ }
+ /*
+ * "src" is padded to end on a two-byte boundary, but we don't want to
+ * assume "dest" is, so we handle odd length specially.
+ */
+ if ((size & 1) != 0) {
+ ((u1*)dest)[size-1] = ((u1*)src)[size];
+ }
+ break;
+ case 2:
+ /* already swapped correctly */
+ memcpy(dest, src, size*width);
+ break;
+ case 4:
+ /* swap word halves */
+ for (i = 0; i < (int) size; i++) {
+ ((u4*)dest)[i] = (src[(i << 1) + 1] << 16) | src[i << 1];
+ }
+ break;
+ case 8:
+ /* swap word halves and words */
+ for (i = 0; i < (int) (size << 1); i += 2) {
+ ((int*)dest)[i] = (src[(i << 1) + 3] << 16) | src[(i << 1) + 2];
+ ((int*)dest)[i+1] = (src[(i << 1) + 1] << 16) | src[i << 1];
+ }
+ break;
+ default:
+ LOGE("Unexpected width %d in copySwappedArrayData\n", width);
+ dvmAbort();
+ break;
+ }
+#endif
+}
+
+/*
* Fill the array with predefined constant values.
*
* Returns true if job is completed, otherwise false to indicate that
@@ -551,7 +608,7 @@ bool dvmInterpHandleFillArrayData(ArrayObject* arrayObj, const u2* arrayData)
dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL);
return false;
}
- memcpy(arrayObj->contents, &arrayData[4], size*width);
+ copySwappedArrayData(arrayObj->contents, &arrayData[4], size, width);
return true;
}
@@ -634,6 +691,189 @@ Method* dvmInterpFindInterfaceMethod(ClassObject* thisClass, u4 methodIdx,
}
+
+/*
+ * Helpers for dvmThrowVerificationError().
+ *
+ * Each returns a newly-allocated string.
+ */
+#define kThrowShow_accessFromClass 1
+static char* classNameFromIndex(const Method* method, int ref,
+ VerifyErrorRefType refType, int flags)
+{
+ static const int kBufLen = 256;
+ const DvmDex* pDvmDex = method->clazz->pDvmDex;
+
+ if (refType == VERIFY_ERROR_REF_FIELD) {
+ /* get class ID from field ID */
+ const DexFieldId* pFieldId = dexGetFieldId(pDvmDex->pDexFile, ref);
+ ref = pFieldId->classIdx;
+ } else if (refType == VERIFY_ERROR_REF_METHOD) {
+ /* get class ID from method ID */
+ const DexMethodId* pMethodId = dexGetMethodId(pDvmDex->pDexFile, ref);
+ ref = pMethodId->classIdx;
+ }
+
+ const char* className = dexStringByTypeIdx(pDvmDex->pDexFile, ref);
+ char* dotClassName = dvmDescriptorToDot(className);
+ if (flags == 0)
+ return dotClassName;
+
+ char* result = (char*) malloc(kBufLen);
+
+ if ((flags & kThrowShow_accessFromClass) != 0) {
+ char* dotFromName = dvmDescriptorToDot(method->clazz->descriptor);
+ snprintf(result, kBufLen, "tried to access class %s from class %s",
+ dotClassName, dotFromName);
+ free(dotFromName);
+ } else {
+ assert(false); // should've been caught above
+ result[0] = '\0';
+ }
+
+ free(dotClassName);
+ return result;
+}
+static char* fieldNameFromIndex(const Method* method, int ref,
+ VerifyErrorRefType refType, int flags)
+{
+ static const int kBufLen = 256;
+ const DvmDex* pDvmDex = method->clazz->pDvmDex;
+ const DexFieldId* pFieldId;
+ const char* className;
+ const char* fieldName;
+
+ if (refType != VERIFY_ERROR_REF_FIELD) {
+ LOGW("Expected ref type %d, got %d\n", VERIFY_ERROR_REF_FIELD, refType);
+ return NULL; /* no message */
+ }
+
+ pFieldId = dexGetFieldId(pDvmDex->pDexFile, ref);
+ className = dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->classIdx);
+ fieldName = dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx);
+
+ char* dotName = dvmDescriptorToDot(className);
+ char* result = (char*) malloc(kBufLen);
+
+ if ((flags & kThrowShow_accessFromClass) != 0) {
+ char* dotFromName = dvmDescriptorToDot(method->clazz->descriptor);
+ snprintf(result, kBufLen, "tried to access field %s.%s from class %s",
+ dotName, fieldName, dotFromName);
+ free(dotFromName);
+ } else {
+ snprintf(result, kBufLen, "%s.%s", dotName, fieldName);
+ }
+
+ free(dotName);
+ return result;
+}
+static char* methodNameFromIndex(const Method* method, int ref,
+ VerifyErrorRefType refType, int flags)
+{
+ static const int kBufLen = 384;
+ const DvmDex* pDvmDex = method->clazz->pDvmDex;
+ const DexMethodId* pMethodId;
+ const char* className;
+ const char* methodName;
+
+ if (refType != VERIFY_ERROR_REF_METHOD) {
+ LOGW("Expected ref type %d, got %d\n", VERIFY_ERROR_REF_METHOD,refType);
+ return NULL; /* no message */
+ }
+
+ pMethodId = dexGetMethodId(pDvmDex->pDexFile, ref);
+ className = dexStringByTypeIdx(pDvmDex->pDexFile, pMethodId->classIdx);
+ methodName = dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx);
+
+ char* dotName = dvmDescriptorToDot(className);
+ char* result = (char*) malloc(kBufLen);
+
+ if ((flags & kThrowShow_accessFromClass) != 0) {
+ char* dotFromName = dvmDescriptorToDot(method->clazz->descriptor);
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+ snprintf(result, kBufLen,
+ "tried to access method %s.%s:%s from class %s",
+ dotName, methodName, desc, dotFromName);
+ free(dotFromName);
+ free(desc);
+ } else {
+ snprintf(result, kBufLen, "%s.%s", dotName, methodName);
+ }
+
+ free(dotName);
+ return result;
+}
+
+/*
+ * Throw an exception for a problem identified by the verifier.
+ *
+ * This is used by the invoke-verification-error instruction. It always
+ * throws an exception.
+ *
+ * "kind" indicates the kind of failure encountered by the verifier. It
+ * has two parts, an error code and an indication of the reference type.
+ */
+void dvmThrowVerificationError(const Method* method, int kind, int ref)
+{
+ const int typeMask = 0xff << kVerifyErrorRefTypeShift;
+ VerifyError errorKind = kind & ~typeMask;
+ VerifyErrorRefType refType = kind >> kVerifyErrorRefTypeShift;
+ const char* exceptionName = "Ljava/lang/VerifyError;";
+ char* msg = NULL;
+
+ switch ((VerifyError) errorKind) {
+ case VERIFY_ERROR_NO_CLASS:
+ exceptionName = "Ljava/lang/NoClassDefFoundError;";
+ msg = classNameFromIndex(method, ref, refType, 0);
+ break;
+ case VERIFY_ERROR_NO_FIELD:
+ exceptionName = "Ljava/lang/NoSuchFieldError;";
+ msg = fieldNameFromIndex(method, ref, refType, 0);
+ break;
+ case VERIFY_ERROR_NO_METHOD:
+ exceptionName = "Ljava/lang/NoSuchMethodError;";
+ msg = methodNameFromIndex(method, ref, refType, 0);
+ break;
+ case VERIFY_ERROR_ACCESS_CLASS:
+ exceptionName = "Ljava/lang/IllegalAccessError;";
+ msg = classNameFromIndex(method, ref, refType,
+ kThrowShow_accessFromClass);
+ break;
+ case VERIFY_ERROR_ACCESS_FIELD:
+ exceptionName = "Ljava/lang/IllegalAccessError;";
+ msg = fieldNameFromIndex(method, ref, refType,
+ kThrowShow_accessFromClass);
+ break;
+ case VERIFY_ERROR_ACCESS_METHOD:
+ exceptionName = "Ljava/lang/IllegalAccessError;";
+ msg = methodNameFromIndex(method, ref, refType,
+ kThrowShow_accessFromClass);
+ break;
+ case VERIFY_ERROR_CLASS_CHANGE:
+ exceptionName = "Ljava/lang/IncompatibleClassChangeError;";
+ msg = classNameFromIndex(method, ref, refType, 0);
+ break;
+ case VERIFY_ERROR_INSTANTIATION:
+ exceptionName = "Ljava/lang/InstantiationError;";
+ msg = classNameFromIndex(method, ref, refType, 0);
+ break;
+
+ case VERIFY_ERROR_GENERIC:
+ /* generic VerifyError; use default exception, no message */
+ break;
+ case VERIFY_ERROR_NONE:
+ /* should never happen; use default exception */
+ assert(false);
+ msg = strdup("weird - no error specified");
+ break;
+
+ /* no default clause -- want warning if enum updated */
+ }
+
+ dvmThrowException(exceptionName, msg);
+ free(msg);
+}
+
/*
* Main interpreter loop entry point. Select "standard" or "debug"
* interpreter and switch between them as required.
@@ -649,6 +889,29 @@ void dvmInterpret(Thread* self, const Method* method, JValue* pResult)
{
InterpState interpState;
bool change;
+#if defined(WITH_JIT)
+ /* Interpreter entry points from compiled code */
+ extern void dvmJitToInterpNormal();
+ extern void dvmJitToInterpNoChain();
+ extern void dvmJitToInterpPunt();
+ extern void dvmJitToInterpSingleStep();
+ extern void dvmJitToTraceSelect();
+ extern void dvmJitToPatchPredictedChain();
+
+ /*
+ * Reserve a static entity here to quickly setup runtime contents as
+ * gcc will issue block copy instructions.
+ */
+ static struct JitToInterpEntries jitToInterpEntries = {
+ dvmJitToInterpNormal,
+ dvmJitToInterpNoChain,
+ dvmJitToInterpPunt,
+ dvmJitToInterpSingleStep,
+ dvmJitToTraceSelect,
+ dvmJitToPatchPredictedChain,
+ };
+#endif
+
#if defined(WITH_TRACKREF_CHECKS)
interpState.debugTrackedRefStart =
@@ -657,6 +920,19 @@ void dvmInterpret(Thread* self, const Method* method, JValue* pResult)
#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
interpState.debugIsMethodEntry = true;
#endif
+#if defined(WITH_JIT)
+ interpState.jitState = gDvmJit.pJitEntryTable ? kJitNormal : kJitOff;
+
+ /* Setup the Jit-to-interpreter entry points */
+ interpState.jitToInterpEntries = jitToInterpEntries;
+
+ /*
+ * Initialize the threshold filter [don't bother to zero out the
+ * actual table. We're looking for matches, and an occasional
+ * false positive is acceptible.
+ */
+ interpState.lastThreshFilter = 0;
+#endif
/*
* Initialize working state.
@@ -692,6 +968,14 @@ void dvmInterpret(Thread* self, const Method* method, JValue* pResult)
Interpreter stdInterp;
if (gDvm.executionMode == kExecutionModeInterpFast)
stdInterp = dvmMterpStd;
+#if defined(WITH_JIT)
+ else if (gDvm.executionMode == kExecutionModeJit)
+/* If profiling overhead can be kept low enough, we can use a profiling
+ * mterp fast for both Jit and "fast" modes. If overhead is too high,
+ * create a specialized profiling interpreter.
+ */
+ stdInterp = dvmMterpStd;
+#endif
else
stdInterp = dvmInterpretStd;
@@ -702,7 +986,7 @@ void dvmInterpret(Thread* self, const Method* method, JValue* pResult)
LOGVV("threadid=%d: interp STD\n", self->threadId);
change = (*stdInterp)(self, &interpState);
break;
-#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER) || defined(WITH_JIT)
case INTERP_DBG:
LOGVV("threadid=%d: interp DBG\n", self->threadId);
change = dvmInterpretDbg(self, &interpState);
@@ -715,4 +999,3 @@ void dvmInterpret(Thread* self, const Method* method, JValue* pResult)
*pResult = interpState.retval;
}
-
diff --git a/vm/interp/Interp.h b/vm/interp/Interp.h
index eb36b9f50..cd4c7ec04 100644
--- a/vm/interp/Interp.h
+++ b/vm/interp/Interp.h
@@ -26,6 +26,14 @@
void dvmInterpret(Thread* thread, const Method* method, JValue* pResult);
/*
+ * Throw an exception for a problem detected by the verifier.
+ *
+ * This is called from the handler for the throw-verification-error
+ * instruction. "method" is the method currently being executed.
+ */
+void dvmThrowVerificationError(const Method* method, int kind, int ref);
+
+/*
* Breakpoint optimization table.
*/
void dvmInitBreakpoints();
diff --git a/vm/interp/InterpDefs.h b/vm/interp/InterpDefs.h
index 856c2f586..c9c80e32f 100644
--- a/vm/interp/InterpDefs.h
+++ b/vm/interp/InterpDefs.h
@@ -32,8 +32,51 @@ typedef enum InterpEntry {
kInterpEntryInstr = 0, // continue to next instruction
kInterpEntryReturn = 1, // jump to method return
kInterpEntryThrow = 2, // jump to exception throw
+#if defined(WITH_JIT)
+ kInterpEntryResume = 3, // Resume after single-step
+#endif
} InterpEntry;
+#if defined(WITH_JIT)
+/*
+ * There are six entry points from the compiled code to the interpreter:
+ * 1) dvmJitToInterpNormal: find if there is a corresponding compilation for
+ * the new dalvik PC. If so, chain the originating compilation with the
+ * target then jump to it.
+ * 2) dvmJitToInterpInvokeNoChain: similar to 1) but don't chain. This is
+ * for handling 1-to-many mappings like virtual method call and
+ * packed switch.
+ * 3) dvmJitToInterpPunt: use the fast interpreter to execute the next
+ * instruction(s) and stay there as long as it is appropriate to return
+ * to the compiled land. This is used when the jit'ed code is about to
+ * throw an exception.
+ * 4) dvmJitToInterpSingleStep: use the portable interpreter to execute the
+ * next instruction only and return to pre-specified location in the
+ * compiled code to resume execution. This is mainly used as debugging
+ * feature to bypass problematic opcode implementations without
+ * disturbing the trace formation.
+ * 5) dvmJitToTraceSelect: if there is a single exit from a translation that
+ * has already gone hot enough to be translated, we should assume that
+ * the exit point should also be translated (this is a common case for
+ * invokes). This trace exit will first check for a chaining
+ * opportunity, and if none is available will switch to the debug
+ * interpreter immediately for trace selection (as if threshold had
+ * just been reached).
+ * 6) dvmJitToPredictedChain: patch the chaining cell for a virtual call site
+ * to a predicted callee.
+ */
+struct JitToInterpEntries {
+ void *dvmJitToInterpNormal;
+ void *dvmJitToInterpNoChain;
+ void *dvmJitToInterpPunt;
+ void *dvmJitToInterpSingleStep;
+ void *dvmJitToTraceSelect;
+ void *dvmJitToPatchPredictedChain;
+};
+
+#define JIT_TRACE_THRESH_FILTER_SIZE 16
+#endif
+
/*
* Interpreter context, used when switching from one interpreter to
* another. We also tuck "mterp" state in here.
@@ -66,7 +109,7 @@ typedef struct InterpState {
const u1* interpStackEnd;
volatile int* pSelfSuspendCount;
#if defined(WITH_DEBUGGER)
- volatile bool* pDebuggerActive;
+ volatile u1* pDebuggerActive;
#endif
#if defined(WITH_PROFILER)
volatile int* pActiveProfilers;
@@ -78,8 +121,17 @@ typedef struct InterpState {
* Interpreter switching.
*/
InterpEntry entryPoint; // what to do when we start
- int nextMode; // INTERP_STD or INTERP_DBG
+ int nextMode; // INTERP_STD, INTERP_DBG
+#if defined(WITH_JIT)
+ /*
+ * Local copies of field from gDvm placed here for fast access
+ */
+ unsigned char* pJitProfTable;
+ JitState jitState;
+ void* jitResume;
+ u2* jitResumePC;
+#endif
#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
bool debugIsMethodEntry; // used for method entry event triggers
@@ -88,6 +140,19 @@ typedef struct InterpState {
int debugTrackedRefStart; // tracked refs from prior invocations
#endif
+#if defined(WITH_JIT)
+ struct JitToInterpEntries jitToInterpEntries;
+
+ int currTraceRun;
+ int totalTraceLen; // Number of Dalvik insts in trace
+ const u2* currTraceHead; // Start of the trace we're building
+ const u2* currRunHead; // Start of run we're building
+ int currRunLen; // Length of run in 16-bit words
+ int lastThreshFilter;
+ const u2* threshFilter[JIT_TRACE_THRESH_FILTER_SIZE];
+ JitTraceRun trace[MAX_JIT_RUN_LEN];
+#endif
+
} InterpState;
/*
@@ -123,7 +188,7 @@ s4 dvmInterpHandleSparseSwitch(const u2* switchData, s4 testVal);
/*
* Process fill-array-data.
*/
-bool dvmInterpHandleFillArrayData(ArrayObject* arrayObject,
+bool dvmInterpHandleFillArrayData(ArrayObject* arrayObject,
const u2* arrayData);
/*
@@ -145,4 +210,19 @@ static inline bool dvmDebuggerOrProfilerActive(void)
;
}
+#if defined(WITH_JIT)
+/*
+ * Determine if the jit, debugger or profiler is currently active. Used when
+ * selecting which interpreter to switch to.
+ */
+static inline bool dvmJitDebuggerOrProfilerActive(int jitState)
+{
+ return jitState != kJitOff
+#if defined(WITH_PROFILER)
+ || gDvm.activeProfilers != 0
+#endif
+ ||gDvm.debuggerActive;
+}
+#endif
+
#endif /*_DALVIK_INTERP_DEFS*/
diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c
new file mode 100644
index 000000000..2bcb1f5c3
--- /dev/null
+++ b/vm/interp/Jit.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifdef WITH_JIT
+
+/*
+ * Target independent portion of Android's Jit
+ */
+
+#include "Dalvik.h"
+#include "Jit.h"
+
+
+#include "dexdump/OpCodeNames.h"
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <signal.h>
+#include "compiler/Compiler.h"
+#include "compiler/CompilerUtility.h"
+#include "compiler/CompilerIR.h"
+#include <errno.h>
+
+int dvmJitStartup(void)
+{
+ unsigned int i;
+ bool res = true; /* Assume success */
+
+ // Create the compiler thread and setup miscellaneous chores */
+ res &= dvmCompilerStartup();
+
+ dvmInitMutex(&gDvmJit.tableLock);
+ if (res && gDvm.executionMode == kExecutionModeJit) {
+ JitEntry *pJitTable = NULL;
+ unsigned char *pJitProfTable = NULL;
+ assert(gDvm.jitTableSize &&
+ !(gDvm.jitTableSize & (gDvmJit.jitTableSize - 1))); // Power of 2?
+ dvmLockMutex(&gDvmJit.tableLock);
+ pJitTable = (JitEntry*)
+ calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
+ if (!pJitTable) {
+ LOGE("jit table allocation failed\n");
+ res = false;
+ goto done;
+ }
+ /*
+ * NOTE: the profile table must only be allocated once, globally.
+ * Profiling is turned on and off by nulling out gDvm.pJitProfTable
+ * and then restoring its original value. However, this action
+ * is not syncronized for speed so threads may continue to hold
+ * and update the profile table after profiling has been turned
+ * off by null'ng the global pointer. Be aware.
+ */
+ pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
+ if (!pJitProfTable) {
+ LOGE("jit prof table allocation failed\n");
+ res = false;
+ goto done;
+ }
+ memset(pJitProfTable,0,JIT_PROF_SIZE);
+ for (i=0; i < gDvmJit.jitTableSize; i++) {
+ pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
+ }
+ /* Is chain field wide enough for termination pattern? */
+ assert(pJitTable[0].u.info.chain == gDvm.maxJitTableEntries);
+
+done:
+ gDvmJit.pJitEntryTable = pJitTable;
+ gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
+ gDvmJit.jitTableEntriesUsed = 0;
+ gDvmJit.pProfTableCopy = gDvmJit.pProfTable = pJitProfTable;
+ dvmUnlockMutex(&gDvmJit.tableLock);
+ }
+ return res;
+}
+
+/*
+ * If one of our fixed tables or the translation buffer fills up,
+ * call this routine to avoid wasting cycles on future translation requests.
+ */
+void dvmJitStopTranslationRequests()
+{
+ /*
+ * Note 1: This won't necessarily stop all translation requests, and
+ * operates on a delayed mechanism. Running threads look to the copy
+ * of this value in their private InterpState structures and won't see
+ * this change until it is refreshed (which happens on interpreter
+ * entry).
+ * Note 2: This is a one-shot memory leak on this table. Because this is a
+ * permanent off switch for Jit profiling, it is a one-time leak of 1K
+ * bytes, and no further attempt will be made to re-allocate it. Can't
+ * free it because some thread may be holding a reference.
+ */
+ gDvmJit.pProfTable = gDvmJit.pProfTableCopy = NULL;
+}
+
+#if defined(EXIT_STATS)
+/* Convenience function to increment counter from assembly code */
+void dvmBumpNoChain()
+{
+ gDvm.jitNoChainExit++;
+}
+
+/* Convenience function to increment counter from assembly code */
+void dvmBumpNormal()
+{
+ gDvm.jitNormalExit++;
+}
+
+/* Convenience function to increment counter from assembly code */
+void dvmBumpPunt(int from)
+{
+ gDvm.jitPuntExit++;
+}
+#endif
+
+/* Dumps debugging & tuning stats to the log */
+void dvmJitStats()
+{
+ int i;
+ int hit;
+ int not_hit;
+ int chains;
+ if (gDvmJit.pJitEntryTable) {
+ for (i=0, chains=hit=not_hit=0;
+ i < (int) gDvmJit.jitTableSize;
+ i++) {
+ if (gDvmJit.pJitEntryTable[i].dPC != 0)
+ hit++;
+ else
+ not_hit++;
+ if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
+ chains++;
+ }
+ LOGD(
+ "JIT: %d traces, %d slots, %d chains, %d maxQ, %d thresh, %s",
+ hit, not_hit + hit, chains, gDvmJit.compilerMaxQueued,
+ gDvmJit.threshold, gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
+#if defined(EXIT_STATS)
+ LOGD(
+ "JIT: Lookups: %d hits, %d misses; %d NoChain, %d normal, %d punt",
+ gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
+ gDvmJit.noChainExit, gDvmJit.normalExit, gDvmJit.puntExit);
+#endif
+ LOGD("JIT: %d Translation chains", gDvmJit.translationChains);
+#if defined(INVOKE_STATS)
+ LOGD("JIT: Invoke: %d chainable, %d pred. chain, %d native, "
+ "%d return",
+ gDvmJit.invokeChain, gDvmJit.invokePredictedChain,
+ gDvmJit.invokeNative, gDvmJit.returnOp);
+#endif
+ if (gDvmJit.profile) {
+ dvmCompilerSortAndPrintTraceProfiles();
+ }
+ }
+}
+
+
+/*
+ * Final JIT shutdown. Only do this once, and do not attempt to restart
+ * the JIT later.
+ */
+void dvmJitShutdown(void)
+{
+ /* Shutdown the compiler thread */
+ dvmCompilerShutdown();
+
+ dvmCompilerDumpStats();
+
+ dvmDestroyMutex(&gDvmJit.tableLock);
+
+ if (gDvmJit.pJitEntryTable) {
+ free(gDvmJit.pJitEntryTable);
+ gDvmJit.pJitEntryTable = NULL;
+ }
+
+ if (gDvmJit.pProfTable) {
+ free(gDvmJit.pProfTable);
+ gDvmJit.pProfTable = NULL;
+ }
+}
+
+/*
+ * Adds to the current trace request one instruction at a time, just
+ * before that instruction is interpreted. This is the primary trace
+ * selection function. NOTE: return instruction are handled a little
+ * differently. In general, instructions are "proposed" to be added
+ * to the current trace prior to interpretation. If the interpreter
+ * then successfully completes the instruction, is will be considered
+ * part of the request. This allows us to examine machine state prior
+ * to interpretation, and also abort the trace request if the instruction
+ * throws or does something unexpected. However, return instructions
+ * will cause an immediate end to the translation request - which will
+ * be passed to the compiler before the return completes. This is done
+ * in response to special handling of returns by the interpreter (and
+ * because returns cannot throw in a way that causes problems for the
+ * translated code.
+ */
+int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
+{
+ int flags,i,len;
+ int switchInterp = false;
+ int debugOrProfile = (gDvm.debuggerActive || self->suspendCount
+#if defined(WITH_PROFILER)
+ || gDvm.activeProfilers
+#endif
+ );
+
+ switch (interpState->jitState) {
+ char* nopStr;
+ int target;
+ int offset;
+ DecodedInstruction decInsn;
+ case kJitTSelect:
+ dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
+#if defined(SHOW_TRACE)
+ LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
+#endif
+ flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
+ len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, pc);
+ offset = pc - interpState->method->insns;
+ if (pc != interpState->currRunHead + interpState->currRunLen) {
+ int currTraceRun;
+ /* We need to start a new trace run */
+ currTraceRun = ++interpState->currTraceRun;
+ interpState->currRunLen = 0;
+ interpState->currRunHead = (u2*)pc;
+ interpState->trace[currTraceRun].frag.startOffset = offset;
+ interpState->trace[currTraceRun].frag.numInsts = 0;
+ interpState->trace[currTraceRun].frag.runEnd = false;
+ interpState->trace[currTraceRun].frag.hint = kJitHintNone;
+ }
+ interpState->trace[interpState->currTraceRun].frag.numInsts++;
+ interpState->totalTraceLen++;
+ interpState->currRunLen += len;
+ if ( ((flags & kInstrUnconditional) == 0) &&
+ /* don't end trace on INVOKE_DIRECT_EMPTY */
+ (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
+ ((flags & (kInstrCanBranch |
+ kInstrCanSwitch |
+ kInstrCanReturn |
+ kInstrInvoke)) != 0)) {
+ interpState->jitState = kJitTSelectEnd;
+#if defined(SHOW_TRACE)
+ LOGD("TraceGen: ending on %s, basic block end",
+ getOpcodeName(decInsn.opCode));
+#endif
+ }
+ if (decInsn.opCode == OP_THROW) {
+ interpState->jitState = kJitTSelectEnd;
+ }
+ if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
+ interpState->jitState = kJitTSelectEnd;
+ }
+ if (debugOrProfile) {
+ interpState->jitState = kJitTSelectAbort;
+ switchInterp = !debugOrProfile;
+ break;
+ }
+ if ((flags & kInstrCanReturn) != kInstrCanReturn) {
+ break;
+ }
+ /* NOTE: intentional fallthrough for returns */
+ case kJitTSelectEnd:
+ {
+ if (interpState->totalTraceLen == 0) {
+ switchInterp = !debugOrProfile;
+ break;
+ }
+ JitTraceDescription* desc =
+ (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
+ sizeof(JitTraceRun) * (interpState->currTraceRun+1));
+ if (desc == NULL) {
+ LOGE("Out of memory in trace selection");
+ dvmJitStopTranslationRequests();
+ interpState->jitState = kJitTSelectAbort;
+ switchInterp = !debugOrProfile;
+ break;
+ }
+ interpState->trace[interpState->currTraceRun].frag.runEnd =
+ true;
+ interpState->jitState = kJitNormal;
+ desc->method = interpState->method;
+ memcpy((char*)&(desc->trace[0]),
+ (char*)&(interpState->trace[0]),
+ sizeof(JitTraceRun) * (interpState->currTraceRun+1));
+#if defined(SHOW_TRACE)
+ LOGD("TraceGen: trace done, adding to queue");
+#endif
+ dvmCompilerWorkEnqueue(
+ interpState->currTraceHead,kWorkOrderTrace,desc);
+ if (gDvmJit.blockingMode) {
+ dvmCompilerDrainQueue();
+ }
+ switchInterp = !debugOrProfile;
+ }
+ break;
+ case kJitSingleStep:
+ interpState->jitState = kJitSingleStepEnd;
+ break;
+ case kJitSingleStepEnd:
+ interpState->entryPoint = kInterpEntryResume;
+ switchInterp = !debugOrProfile;
+ break;
+ case kJitTSelectAbort:
+#if defined(SHOW_TRACE)
+ LOGD("TraceGen: trace abort");
+#endif
+ interpState->jitState = kJitNormal;
+ switchInterp = !debugOrProfile;
+ break;
+ case kJitNormal:
+ switchInterp = !debugOrProfile;
+ break;
+ default:
+ dvmAbort();
+ }
+ return switchInterp;
+}
+
+static inline JitEntry *findJitEntry(const u2* pc)
+{
+ int idx = dvmJitHash(pc);
+
+ /* Expect a high hit rate on 1st shot */
+ if (gDvmJit.pJitEntryTable[idx].dPC == pc)
+ return &gDvmJit.pJitEntryTable[idx];
+ else {
+ int chainEndMarker = gDvmJit.jitTableSize;
+ while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
+ idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
+ if (gDvmJit.pJitEntryTable[idx].dPC == pc)
+ return &gDvmJit.pJitEntryTable[idx];
+ }
+ }
+ return NULL;
+}
+
+JitEntry *dvmFindJitEntry(const u2* pc)
+{
+ return findJitEntry(pc);
+}
+
+/*
+ * If a translated code address exists for the davik byte code
+ * pointer return it. This routine needs to be fast.
+ */
+void* dvmJitGetCodeAddr(const u2* dPC)
+{
+ int idx = dvmJitHash(dPC);
+
+ /* If anything is suspended, don't re-enter the code cache */
+ if (gDvm.sumThreadSuspendCount > 0) {
+ return NULL;
+ }
+
+ /* Expect a high hit rate on 1st shot */
+ if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
+#if defined(EXIT_STATS)
+ gDvmJit.addrLookupsFound++;
+#endif
+ return gDvmJit.pJitEntryTable[idx].codeAddress;
+ } else {
+ int chainEndMarker = gDvmJit.jitTableSize;
+ while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
+ idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
+ if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
+#if defined(EXIT_STATS)
+ gDvmJit.addrLookupsFound++;
+#endif
+ return gDvmJit.pJitEntryTable[idx].codeAddress;
+ }
+ }
+ }
+#if defined(EXIT_STATS)
+ gDvmJit.addrLookupsNotFound++;
+#endif
+ return NULL;
+}
+
+/*
+ * Find an entry in the JitTable, creating if necessary.
+ * Returns null if table is full.
+ */
+JitEntry *dvmJitLookupAndAdd(const u2* dPC)
+{
+ u4 chainEndMarker = gDvmJit.jitTableSize;
+ u4 idx = dvmJitHash(dPC);
+
+ /* Walk the bucket chain to find an exact match for our PC */
+ while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
+ (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
+ idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
+ }
+
+ if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
+ /*
+ * No match. Aquire jitTableLock and find the last
+ * slot in the chain. Possibly continue the chain walk in case
+ * some other thread allocated the slot we were looking
+ * at previuosly (perhaps even the dPC we're trying to enter).
+ */
+ dvmLockMutex(&gDvmJit.tableLock);
+ /*
+ * At this point, if .dPC is NULL, then the slot we're
+ * looking at is the target slot from the primary hash
+ * (the simple, and common case). Otherwise we're going
+ * to have to find a free slot and chain it.
+ */
+ MEM_BARRIER(); /* Make sure we reload [].dPC after lock */
+ if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
+ u4 prev;
+ while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
+ if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
+ /* Another thread got there first for this dPC */
+ dvmUnlockMutex(&gDvmJit.tableLock);
+ return &gDvmJit.pJitEntryTable[idx];
+ }
+ idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
+ }
+ /* Here, idx should be pointing to the last cell of an
+ * active chain whose last member contains a valid dPC */
+ assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
+ /* Linear walk to find a free cell and add it to the end */
+ prev = idx;
+ while (true) {
+ idx++;
+ if (idx == chainEndMarker)
+ idx = 0; /* Wraparound */
+ if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
+ (idx == prev))
+ break;
+ }
+ if (idx != prev) {
+ JitEntryInfoUnion oldValue;
+ JitEntryInfoUnion newValue;
+ /*
+ * Although we hold the lock so that noone else will
+ * be trying to update a chain field, the other fields
+ * packed into the word may be in use by other threads.
+ */
+ do {
+ oldValue = gDvmJit.pJitEntryTable[prev].u;
+ newValue = oldValue;
+ newValue.info.chain = idx;
+ } while (!ATOMIC_CMP_SWAP(
+ &gDvmJit.pJitEntryTable[prev].u.infoWord,
+ oldValue.infoWord, newValue.infoWord));
+ }
+ }
+ if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
+ /* Allocate the slot */
+ gDvmJit.pJitEntryTable[idx].dPC = dPC;
+ gDvmJit.jitTableEntriesUsed++;
+ } else {
+ /* Table is full */
+ idx = chainEndMarker;
+ }
+ dvmUnlockMutex(&gDvmJit.tableLock);
+ }
+ return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
+}
+/*
+ * Register the translated code pointer into the JitTable.
+ * NOTE: Once a codeAddress field transitions from NULL to
+ * JIT'd code, it must not be altered without first halting all
+ * threads. This routine should only be called by the compiler
+ * thread.
+ */
+void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
+ JitEntryInfoUnion oldValue;
+ JitEntryInfoUnion newValue;
+ JitEntry *jitEntry = dvmJitLookupAndAdd(dPC);
+ assert(jitEntry);
+ /* Note: order of update is important */
+ do {
+ oldValue = jitEntry->u;
+ newValue = oldValue;
+ newValue.info.instructionSet = set;
+ } while (!ATOMIC_CMP_SWAP(
+ &jitEntry->u.infoWord,
+ oldValue.infoWord, newValue.infoWord));
+ jitEntry->codeAddress = nPC;
+}
+
+/*
+ * Determine if valid trace-bulding request is active. Return true
+ * if we need to abort and switch back to the fast interpreter, false
+ * otherwise. NOTE: may be called even when trace selection is not being
+ * requested
+ */
+
+bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
+{
+ bool res = false; /* Assume success */
+ int i;
+ if (gDvmJit.pJitEntryTable != NULL) {
+ /* Two-level filtering scheme */
+ for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
+ if (interpState->pc == interpState->threshFilter[i]) {
+ break;
+ }
+ }
+ if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
+ /*
+ * Use random replacement policy - otherwise we could miss a large
+ * loop that contains more traces than the size of our filter array.
+ */
+ i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
+ interpState->threshFilter[i] = interpState->pc;
+ res = true;
+ }
+ /*
+ * If the compiler is backlogged, or if a debugger or profiler is
+ * active, cancel any JIT actions
+ */
+ if ( res || (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) ||
+ gDvm.debuggerActive || self->suspendCount
+#if defined(WITH_PROFILER)
+ || gDvm.activeProfilers
+#endif
+ ) {
+ if (interpState->jitState != kJitOff) {
+ interpState->jitState = kJitNormal;
+ }
+ } else if (interpState->jitState == kJitTSelectRequest) {
+ JitEntry *slot = dvmJitLookupAndAdd(interpState->pc);
+ if (slot == NULL) {
+ /*
+ * Table is full. This should have been
+ * detected by the compiler thread and the table
+ * resized before we run into it here. Assume bad things
+ * are afoot and disable profiling.
+ */
+ interpState->jitState = kJitTSelectAbort;
+ LOGD("JIT: JitTable full, disabling profiling");
+ dvmJitStopTranslationRequests();
+ } else if (slot->u.info.traceRequested) {
+ /* Trace already requested - revert to interpreter */
+ interpState->jitState = kJitTSelectAbort;
+ } else {
+ /* Mark request */
+ JitEntryInfoUnion oldValue;
+ JitEntryInfoUnion newValue;
+ do {
+ oldValue = slot->u;
+ newValue = oldValue;
+ newValue.info.traceRequested = true;
+ } while (!ATOMIC_CMP_SWAP( &slot->u.infoWord,
+ oldValue.infoWord, newValue.infoWord));
+ }
+ }
+ switch (interpState->jitState) {
+ case kJitTSelectRequest:
+ interpState->jitState = kJitTSelect;
+ interpState->currTraceHead = interpState->pc;
+ interpState->currTraceRun = 0;
+ interpState->totalTraceLen = 0;
+ interpState->currRunHead = interpState->pc;
+ interpState->currRunLen = 0;
+ interpState->trace[0].frag.startOffset =
+ interpState->pc - interpState->method->insns;
+ interpState->trace[0].frag.numInsts = 0;
+ interpState->trace[0].frag.runEnd = false;
+ interpState->trace[0].frag.hint = kJitHintNone;
+ break;
+ case kJitTSelect:
+ case kJitTSelectAbort:
+ res = true;
+ case kJitSingleStep:
+ case kJitSingleStepEnd:
+ case kJitOff:
+ case kJitNormal:
+ break;
+ default:
+ dvmAbort();
+ }
+ }
+ return res;
+}
+
+/*
+ * Resizes the JitTable. Must be a power of 2, and returns true on failure.
+ * Stops all threads, and thus is a heavyweight operation.
+ */
+bool dvmJitResizeJitTable( unsigned int size )
+{
+ JitEntry *pNewTable;
+ JitEntry *pOldTable;
+ u4 newMask;
+ unsigned int oldSize;
+ unsigned int i;
+
+ assert(gDvm.pJitEntryTable != NULL);
+ assert(size && !(size & (size - 1))); /* Is power of 2? */
+
+ LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
+
+ newMask = size - 1;
+
+ if (size <= gDvmJit.jitTableSize) {
+ return true;
+ }
+
+ pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
+ if (pNewTable == NULL) {
+ return true;
+ }
+ for (i=0; i< size; i++) {
+ pNewTable[i].u.info.chain = size; /* Initialize chain termination */
+ }
+
+ /* Stop all other interpreting/jit'ng threads */
+ dvmSuspendAllThreads(SUSPEND_FOR_JIT);
+
+ pOldTable = gDvmJit.pJitEntryTable;
+ oldSize = gDvmJit.jitTableSize;
+
+ dvmLockMutex(&gDvmJit.tableLock);
+ gDvmJit.pJitEntryTable = pNewTable;
+ gDvmJit.jitTableSize = size;
+ gDvmJit.jitTableMask = size - 1;
+ gDvmJit.jitTableEntriesUsed = 0;
+ dvmUnlockMutex(&gDvmJit.tableLock);
+
+ for (i=0; i < oldSize; i++) {
+ if (pOldTable[i].dPC) {
+ JitEntry *p;
+ u2 chain;
+ p = dvmJitLookupAndAdd(pOldTable[i].dPC);
+ p->dPC = pOldTable[i].dPC;
+ /*
+ * Compiler thread may have just updated the new entry's
+ * code address field, so don't blindly copy null.
+ */
+ if (pOldTable[i].codeAddress != NULL) {
+ p->codeAddress = pOldTable[i].codeAddress;
+ }
+ /* We need to preserve the new chain field, but copy the rest */
+ dvmLockMutex(&gDvmJit.tableLock);
+ chain = p->u.info.chain;
+ p->u = pOldTable[i].u;
+ p->u.info.chain = chain;
+ dvmUnlockMutex(&gDvmJit.tableLock);
+ }
+ }
+
+ free(pOldTable);
+
+ /* Restart the world */
+ dvmResumeAllThreads(SUSPEND_FOR_JIT);
+
+ return false;
+}
+
+/*
+ * Float/double conversion requires clamping to min and max of integer form. If
+ * target doesn't support this normally, use these.
+ */
+s8 dvmJitd2l(double d)
+{
+ static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
+ static const double kMinLong = (double)(s8)0x8000000000000000ULL;
+ if (d >= kMaxLong)
+ return (s8)0x7fffffffffffffffULL;
+ else if (d <= kMinLong)
+ return (s8)0x8000000000000000ULL;
+ else if (d != d) // NaN case
+ return 0;
+ else
+ return (s8)d;
+}
+
+s8 dvmJitf2l(float f)
+{
+ static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
+ static const float kMinLong = (float)(s8)0x8000000000000000ULL;
+ if (f >= kMaxLong)
+ return (s8)0x7fffffffffffffffULL;
+ else if (f <= kMinLong)
+ return (s8)0x8000000000000000ULL;
+ else if (f != f) // NaN case
+ return 0;
+ else
+ return (s8)f;
+}
+
+
+#endif /* WITH_JIT */
diff --git a/vm/interp/Jit.h b/vm/interp/Jit.h
new file mode 100644
index 000000000..660b5ecf6
--- /dev/null
+++ b/vm/interp/Jit.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Jit control
+ */
+#ifndef _DALVIK_INTERP_JIT
+#define _DALVIK_INTERP_JIT
+
+#include "InterpDefs.h"
+
+#define JIT_PROF_SIZE 512
+
+#define JIT_MAX_TRACE_LEN 100
+
+/*
+ * JitTable hash function.
+ */
+
+static inline u4 dvmJitHashMask( const u2* p, u4 mask ) {
+ return ((((u4)p>>12)^(u4)p)>>1) & (mask);
+}
+
+static inline u4 dvmJitHash( const u2* p ) {
+ return dvmJitHashMask( p, gDvmJit.jitTableMask );
+}
+
+/*
+ * Entries in the JIT's address lookup hash table.
+ * Fields which may be updated by multiple threads packed into a
+ * single 32-bit word to allow use of atomic update.
+ */
+
+typedef struct JitEntryInfo {
+ unsigned int traceRequested:1; /* already requested a translation */
+ unsigned int isMethodEntry:1;
+ unsigned int inlineCandidate:1;
+ unsigned int profileEnabled:1;
+ JitInstructionSetType instructionSet:4;
+ unsigned int unused:8;
+ u2 chain; /* Index of next in chain */
+} JitEntryInfo;
+
+typedef union JitEntryInfoUnion {
+ JitEntryInfo info;
+ volatile int infoWord;
+} JitEntryInfoUnion;
+
+typedef struct JitEntry {
+ JitEntryInfoUnion u;
+ u2 chain; /* Index of next in chain */
+ const u2* dPC; /* Dalvik code address */
+ void* codeAddress; /* Code address of native translation */
+} JitEntry;
+
+int dvmJitStartup(void);
+void dvmJitShutdown(void);
+int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState);
+void* dvmJitGetCodeAddr(const u2* dPC);
+bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState);
+void dvmJitStopTranslationRequests(void);
+void dvmJitStats(void);
+bool dvmJitResizeJitTable(unsigned int size);
+struct JitEntry *dvmFindJitEntry(const u2* pc);
+s8 dvmJitd2l(double d);
+s8 dvmJitf2l(float f);
+void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set);
+
+
+#endif /*_DALVIK_INTERP_JIT*/
diff --git a/vm/interp/Stack.c b/vm/interp/Stack.c
index 6f418fdf7..5920cbc3a 100644
--- a/vm/interp/Stack.c
+++ b/vm/interp/Stack.c
@@ -76,9 +76,10 @@ static bool dvmPushInterpFrame(Thread* self, const Method* method)
if (stackPtr - stackReq < self->interpStackEnd) {
/* not enough space */
- LOGW("Stack overflow on call to interp (top=%p cur=%p size=%d %s.%s)\n",
- self->interpStackStart, self->curFrame, self->interpStackSize,
- method->clazz->descriptor, method->name);
+ LOGW("Stack overflow on call to interp "
+ "(req=%d top=%p cur=%p size=%d %s.%s)\n",
+ stackReq, self->interpStackStart, self->curFrame,
+ self->interpStackSize, method->clazz->descriptor, method->name);
dvmHandleStackOverflow(self);
assert(dvmCheckException(self));
return false;
@@ -104,7 +105,7 @@ static bool dvmPushInterpFrame(Thread* self, const Method* method)
breakSaveBlock->prevFrame = self->curFrame;
breakSaveBlock->savedPc = NULL; // not required
- breakSaveBlock->xtra.localRefTop = NULL; // not required
+ breakSaveBlock->xtra.localRefCookie = 0; // not required
breakSaveBlock->method = NULL;
saveBlock->prevFrame = FP_FROM_SAVEAREA(breakSaveBlock);
saveBlock->savedPc = NULL; // not required
@@ -148,9 +149,10 @@ bool dvmPushJNIFrame(Thread* self, const Method* method)
if (stackPtr - stackReq < self->interpStackEnd) {
/* not enough space */
- LOGW("Stack overflow on call to native (top=%p cur=%p size=%d '%s')\n",
- self->interpStackStart, self->curFrame, self->interpStackSize,
- method->name);
+ LOGW("Stack overflow on call to native "
+ "(req=%d top=%p cur=%p size=%d '%s')\n",
+ stackReq, self->interpStackStart, self->curFrame,
+ self->interpStackSize, method->name);
dvmHandleStackOverflow(self);
assert(dvmCheckException(self));
return false;
@@ -180,11 +182,15 @@ bool dvmPushJNIFrame(Thread* self, const Method* method)
breakSaveBlock->prevFrame = self->curFrame;
breakSaveBlock->savedPc = NULL; // not required
- breakSaveBlock->xtra.localRefTop = NULL; // not required
+ breakSaveBlock->xtra.localRefCookie = 0; // not required
breakSaveBlock->method = NULL;
saveBlock->prevFrame = FP_FROM_SAVEAREA(breakSaveBlock);
saveBlock->savedPc = NULL; // not required
- saveBlock->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+#ifdef USE_INDIRECT_REF
+ saveBlock->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
+#else
+ saveBlock->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
+#endif
saveBlock->method = method;
LOGVV("PUSH JNI frame: old=%p new=%p (size=%d)\n",
@@ -217,9 +223,10 @@ bool dvmPushLocalFrame(Thread* self, const Method* method)
if (stackPtr - stackReq < self->interpStackEnd) {
/* not enough space; let JNI throw the exception */
- LOGW("Stack overflow on PushLocal (top=%p cur=%p size=%d '%s')\n",
- self->interpStackStart, self->curFrame, self->interpStackSize,
- method->name);
+ LOGW("Stack overflow on PushLocal "
+ "(req=%d top=%p cur=%p size=%d '%s')\n",
+ stackReq, self->interpStackStart, self->curFrame,
+ self->interpStackSize, method->name);
dvmHandleStackOverflow(self);
assert(dvmCheckException(self));
return false;
@@ -242,7 +249,11 @@ bool dvmPushLocalFrame(Thread* self, const Method* method)
saveBlock->prevFrame = self->curFrame;
saveBlock->savedPc = NULL; // not required
- saveBlock->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+#ifdef USE_INDIRECT_REF
+ saveBlock->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
+#else
+ saveBlock->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
+#endif
saveBlock->method = method;
LOGVV("PUSH JNI local frame: old=%p new=%p (size=%d)\n",
@@ -317,9 +328,9 @@ static bool dvmPopFrame(Thread* self)
saveBlock->method->name,
(SAVEAREA_FROM_FP(saveBlock->prevFrame)->method == NULL) ?
"" : " (JNI local)");
- assert(saveBlock->xtra.localRefTop != NULL);
- assert(saveBlock->xtra.localRefTop >=self->jniLocalRefTable.table &&
- saveBlock->xtra.localRefTop <=self->jniLocalRefTable.nextEntry);
+ assert(saveBlock->xtra.localRefCookie != 0);
+ //assert(saveBlock->xtra.localRefCookie >= self->jniLocalRefTable.table &&
+ // saveBlock->xtra.localRefCookie <=self->jniLocalRefTable.nextEntry);
dvmPopJniLocals(self, saveBlock);
}
@@ -421,7 +432,7 @@ void dvmCallMethod(Thread* self, const Method* method, Object* obj,
va_list args;
va_start(args, pResult);
- dvmCallMethodV(self, method, obj, pResult, args);
+ dvmCallMethodV(self, method, obj, false, pResult, args);
va_end(args);
}
@@ -435,7 +446,7 @@ void dvmCallMethod(Thread* self, const Method* method, Object* obj,
* we don't need to worry about static synchronized methods.
*/
void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
- JValue* pResult, va_list args)
+ bool fromJni, JValue* pResult, va_list args)
{
const char* desc = &(method->shorty[1]); // [0] is the return type.
int verifyCount = 0;
@@ -460,6 +471,7 @@ void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
verifyCount++;
}
+ JNIEnv* env = self->jniEnv;
while (*desc != '\0') {
switch (*(desc++)) {
case 'D': case 'J': {
@@ -476,16 +488,18 @@ void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
verifyCount++;
break;
}
-#ifdef WITH_EXTRA_OBJECT_VALIDATION
case 'L': { /* 'shorty' descr uses L for all refs, incl array */
- Object* argObj = (Object*) va_arg(args, u4);
+ void* argObj = va_arg(args, void*);
assert(obj == NULL || dvmIsValidObject(obj));
- *ins++ = (u4) argObj;
+ if (fromJni)
+ *ins++ = (u4) dvmDecodeIndirectRef(env, argObj);
+ else
+ *ins++ = (u4) argObj;
verifyCount++;
break;
}
-#endif
default: {
+ /* Z B C S I -- all passed as 32-bit integers */
*ins++ = va_arg(args, u4);
verifyCount++;
break;
@@ -505,11 +519,17 @@ void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
//dvmDumpThreadStack(dvmThreadSelf());
if (dvmIsNativeMethod(method)) {
+#ifdef WITH_PROFILER
+ TRACE_METHOD_ENTER(self, method);
+#endif
/*
* Because we leave no space for local variables, "curFrame" points
* directly at the method arguments.
*/
(*method->nativeFunc)(self->curFrame, pResult, method, self);
+#ifdef WITH_PROFILER
+ TRACE_METHOD_EXIT(self, method);
+#endif
} else {
dvmInterpret(self, method, pResult);
}
@@ -532,7 +552,7 @@ bail:
* "args" may be NULL if the method has no arguments.
*/
void dvmCallMethodA(Thread* self, const Method* method, Object* obj,
- JValue* pResult, const jvalue* args)
+ bool fromJni, JValue* pResult, const jvalue* args)
{
const char* desc = &(method->shorty[1]); // [0] is the return type.
int verifyCount = 0;
@@ -549,56 +569,50 @@ void dvmCallMethodA(Thread* self, const Method* method, Object* obj,
/* put "this" pointer into in0 if appropriate */
if (!dvmIsStaticMethod(method)) {
assert(obj != NULL);
- *ins++ = (u4) obj;
+ *ins++ = (u4) obj; /* obj is a "real" ref */
verifyCount++;
}
+ JNIEnv* env = self->jniEnv;
while (*desc != '\0') {
- switch (*(desc++)) {
- case 'D': case 'J': {
- memcpy(ins, &args->j, 8); /* EABI prevents direct store */
- ins += 2;
- verifyCount += 2;
- args++;
- break;
- }
- case 'F': case 'I': case 'L': { /* (no '[' in short signatures) */
- *ins++ = args->i; /* get all 32 bits */
- verifyCount++;
- args++;
- break;
- }
- case 'S': {
- *ins++ = args->s; /* 16 bits, sign-extended */
- verifyCount++;
- args++;
- break;
- }
- case 'C': {
- *ins++ = args->c; /* 16 bits, unsigned */
- verifyCount++;
- args++;
- break;
- }
- case 'B': {
- *ins++ = args->b; /* 8 bits, sign-extended */
- verifyCount++;
- args++;
- break;
- }
- case 'Z': {
- *ins++ = args->z; /* 8 bits, zero or non-zero */
- verifyCount++;
- args++;
- break;
- }
- default: {
- LOGE("Invalid char %c in short signature of %s.%s\n",
- *(desc-1), clazz->descriptor, method->name);
- assert(false);
- goto bail;
- }
+ switch (*desc++) {
+ case 'D': /* 64-bit quantity; have to use */
+ case 'J': /* memcpy() in case of mis-alignment */
+ memcpy(ins, &args->j, 8);
+ ins += 2;
+ verifyCount++; /* this needs an extra push */
+ break;
+ case 'L': /* includes array refs */
+ if (fromJni)
+ *ins++ = (u4) dvmDecodeIndirectRef(env, args->l);
+ else
+ *ins++ = (u4) args->l;
+ break;
+ case 'F':
+ case 'I':
+ *ins++ = args->i; /* full 32 bits */
+ break;
+ case 'S':
+ *ins++ = args->s; /* 16 bits, sign-extended */
+ break;
+ case 'C':
+ *ins++ = args->c; /* 16 bits, unsigned */
+ break;
+ case 'B':
+ *ins++ = args->b; /* 8 bits, sign-extended */
+ break;
+ case 'Z':
+ *ins++ = args->z; /* 8 bits, zero or non-zero */
+ break;
+ default:
+ LOGE("Invalid char %c in short signature of %s.%s\n",
+ *(desc-1), clazz->descriptor, method->name);
+ assert(false);
+ goto bail;
}
+
+ verifyCount++;
+ args++;
}
#ifndef NDEBUG
@@ -611,11 +625,17 @@ void dvmCallMethodA(Thread* self, const Method* method, Object* obj,
#endif
if (dvmIsNativeMethod(method)) {
+#ifdef WITH_PROFILER
+ TRACE_METHOD_ENTER(self, method);
+#endif
/*
* Because we leave no space for local variables, "curFrame" points
* directly at the method arguments.
*/
(*method->nativeFunc)(self->curFrame, pResult, method, self);
+#ifdef WITH_PROFILER
+ TRACE_METHOD_EXIT(self, method);
+#endif
} else {
dvmInterpret(self, method, pResult);
}
@@ -715,11 +735,17 @@ Object* dvmInvokeMethod(Object* obj, const Method* method,
//dvmDumpThreadStack(dvmThreadSelf());
if (dvmIsNativeMethod(method)) {
+#ifdef WITH_PROFILER
+ TRACE_METHOD_ENTER(self, method);
+#endif
/*
* Because we leave no space for local variables, "curFrame" points
* directly at the method arguments.
*/
(*method->nativeFunc)(self->curFrame, &retval, method, self);
+#ifdef WITH_PROFILER
+ TRACE_METHOD_EXIT(self, method);
+#endif
} else {
dvmInterpret(self, method, &retval);
}
@@ -1137,11 +1163,15 @@ static void dumpFrames(const DebugOutputTarget* target, void* framePtr,
first = false;
- assert(framePtr != saveArea->prevFrame);
+ if (saveArea->prevFrame != NULL && saveArea->prevFrame <= framePtr) {
+ LOGW("Warning: loop in stack trace at frame %d (%p -> %p)\n",
+ checkCount, framePtr, saveArea->prevFrame);
+ break;
+ }
framePtr = saveArea->prevFrame;
checkCount++;
- if (checkCount > 200) {
+ if (checkCount > 300) {
dvmPrintDebugMessage(target,
" ***** printed %d frames, not showing any more\n",
checkCount);
diff --git a/vm/interp/Stack.h b/vm/interp/Stack.h
index 1b28d49d2..22f066fb0 100644
--- a/vm/interp/Stack.h
+++ b/vm/interp/Stack.h
@@ -138,14 +138,20 @@ struct StackSaveArea {
const Method* method;
union {
- /* for JNI native methods: top of local reference storage */
- Object** localRefTop;
+ /* for JNI native methods: bottom of local reference segment */
+#ifdef USE_INDIRECT_REF
+ u4 localRefCookie;
+#else
+ Object** localRefCookie;
+#endif
/* for interpreted methods: saved current PC, for exception stack
* traces and debugger traces */
const u2* currentPc;
} xtra;
+ /* Native return pointer for JIT, or 0 if interpreted */
+ const u2* returnAddr;
#ifdef PAD_SAVE_AREA
u4 pad3, pad4, pad5;
#endif
@@ -189,16 +195,18 @@ bool dvmPushLocalFrame(Thread* thread, const Method* method);
bool dvmPopLocalFrame(Thread* thread);
/*
- * Call an interpreted method from native code.
+ * Call an interpreted method from native code. If this is being called
+ * from a JNI function, references in the argument list will be converted
+ * back to pointers.
*
* "obj" should be NULL for "direct" methods.
*/
-void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
- JValue* pResult, va_list args);
-void dvmCallMethodA(Thread* self, const Method* method, Object* obj,
- JValue* pResult, const jvalue* args);
void dvmCallMethod(Thread* self, const Method* method, Object* obj,
JValue* pResult, ...);
+void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
+ bool fromJni, JValue* pResult, va_list args);
+void dvmCallMethodA(Thread* self, const Method* method, Object* obj,
+ bool fromJni, JValue* pResult, const jvalue* args);
/*
* Invoke a method, using the specified arguments and return type, through