summaryrefslogtreecommitdiffstats
path: root/vm
diff options
context:
space:
mode:
authorRicardo Cerqueira <cyanogenmod@cerqueira.org>2013-11-01 17:26:45 +0000
committerRicardo Cerqueira <cyanogenmod@cerqueira.org>2013-11-01 17:26:45 +0000
commit9003fb0f7b8a0c9375e9096871d081ada497859d (patch)
treef841d6101f897d393ef5098edd54f6ea226142ef /vm
parent50ebe16b94a0d6e946146b55ad92b4ae1e28bf48 (diff)
parent6e21232cf2bca0e73bd418413564cb140ab9ccbd (diff)
downloadandroid_dalvik-9003fb0f7b8a0c9375e9096871d081ada497859d.tar.gz
android_dalvik-9003fb0f7b8a0c9375e9096871d081ada497859d.tar.bz2
android_dalvik-9003fb0f7b8a0c9375e9096871d081ada497859d.zip
Merge tag 'android-4.4_r1' into cm-11.0
Android 4.4 Release 1.0
Diffstat (limited to 'vm')
-rw-r--r--vm/AllocTracker.cpp65
-rw-r--r--vm/Android.mk2
-rw-r--r--vm/CheckJni.cpp96
-rw-r--r--vm/Dvm.mk8
-rw-r--r--vm/DvmDex.h3
-rw-r--r--vm/Globals.h19
-rw-r--r--vm/Init.cpp64
-rw-r--r--vm/InitRefs.cpp28
-rw-r--r--vm/Jni.cpp88
-rw-r--r--vm/JniInternal.h2
-rw-r--r--vm/Native.cpp26
-rw-r--r--vm/Profile.cpp258
-rw-r--r--vm/Profile.h48
-rw-r--r--vm/Thread.cpp4
-rw-r--r--vm/Thread.h5
-rw-r--r--vm/alloc/Alloc.cpp14
-rw-r--r--vm/alloc/Alloc.h5
-rw-r--r--vm/alloc/CardTable.cpp46
-rw-r--r--vm/alloc/Heap.cpp3
-rw-r--r--vm/alloc/HeapSource.cpp187
-rw-r--r--vm/alloc/HeapSource.h10
-rw-r--r--vm/alloc/MarkSweep.cpp5
-rw-r--r--vm/analysis/Liveness.cpp3
-rw-r--r--vm/analysis/Optimize.cpp7
-rw-r--r--vm/analysis/RegisterMap.cpp2
-rw-r--r--vm/analysis/VfyBasicBlock.cpp3
-rw-r--r--vm/compiler/Compiler.cpp22
-rw-r--r--vm/compiler/Loop.cpp2
-rw-r--r--vm/compiler/codegen/arm/ArmLIR.h2
-rw-r--r--vm/compiler/codegen/arm/Assemble.cpp7
-rw-r--r--vm/compiler/codegen/arm/CodegenDriver.cpp5
-rw-r--r--vm/compiler/codegen/arm/Thumb/Gen.cpp8
-rw-r--r--vm/compiler/codegen/arm/Thumb2/Gen.cpp7
-rw-r--r--vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.cpp8
-rw-r--r--vm/compiler/codegen/arm/armv5te/ArchVariant.cpp8
-rw-r--r--vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.cpp12
-rw-r--r--vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.h2
-rw-r--r--vm/compiler/codegen/arm/armv7-a/ArchVariant.cpp12
-rw-r--r--vm/compiler/codegen/arm/armv7-a/ArchVariant.h2
-rw-r--r--vm/compiler/codegen/mips/mips/ArchVariant.cpp6
-rw-r--r--vm/compiler/codegen/x86/CodegenInterface.cpp24
-rw-r--r--vm/compiler/codegen/x86/libenc/enc_wrapper.cpp2
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S5
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_RETURN.S2
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S9
-rw-r--r--vm/compiler/template/armv5te/footer.S4
-rw-r--r--vm/compiler/template/armv7-a-neon/TemplateOpList.h19
-rw-r--r--vm/compiler/template/armv7-a/TemplateOpList.h19
-rw-r--r--vm/compiler/template/config-armv7-a3
-rw-r--r--vm/compiler/template/config-armv7-a-neon3
-rw-r--r--vm/compiler/template/ia32/footer.S4
-rw-r--r--vm/compiler/template/mips/footer.S4
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S22
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv5te.S22
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S501
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S501
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-ia32.S4
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-mips.S4
-rw-r--r--vm/dalvik2
-rw-r--r--vm/hprof/Hprof.h4
-rw-r--r--vm/hprof/HprofHeap.cpp9
-rw-r--r--vm/interp/Interp.cpp9
-rw-r--r--vm/interp/InterpState.h3
-rw-r--r--vm/jdwp/JdwpHandler.cpp1
-rw-r--r--vm/native/dalvik_system_DexFile.cpp16
-rw-r--r--vm/native/dalvik_system_VMDebug.cpp141
-rw-r--r--vm/native/dalvik_system_VMRuntime.cpp363
-rw-r--r--vm/native/dalvik_system_Zygote.cpp22
-rw-r--r--vm/native/java_lang_Class.cpp49
-rw-r--r--vm/native/java_lang_System.cpp65
-rw-r--r--vm/oo/Array.cpp24
-rw-r--r--vm/oo/Class.cpp3
-rw-r--r--vm/oo/Object.h8
-rw-r--r--vm/os/android.cpp2
-rw-r--r--vm/reflect/Annotation.cpp41
-rw-r--r--vm/reflect/Reflect.cpp15
76 files changed, 1484 insertions, 1549 deletions
diff --git a/vm/AllocTracker.cpp b/vm/AllocTracker.cpp
index 94984ecd9..8b86c5e81 100644
--- a/vm/AllocTracker.cpp
+++ b/vm/AllocTracker.cpp
@@ -43,8 +43,14 @@
*/
#include "Dalvik.h"
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+static bool isPowerOfTwo(int x) { return (x & (x - 1)) == 0; }
+#endif
+
#define kMaxAllocRecordStackDepth 16 /* max 255 */
-#define kNumAllocRecords 512 /* MUST be power of 2 */
+
+#define kDefaultNumAllocRecords 64*1024 /* MUST be power of 2 */
/*
* Record the details of an allocation.
@@ -59,14 +65,6 @@ struct AllocRecord {
const Method* method; /* which method we're executing in */
int pc; /* current execution offset, in 16-bit units */
} stackElem[kMaxAllocRecordStackDepth];
-
- /*
- * This was going to be either wall-clock time in seconds or monotonic
- * time in milliseconds since the VM started, to give a rough sense for
- * how long ago an allocation happened. This adds a system call per
- * allocation, which is too much overhead.
- */
- //u4 timestamp;
};
/*
@@ -100,6 +98,28 @@ void dvmAllocTrackerShutdown()
* ===========================================================================
*/
+static int getAllocRecordMax() {
+#ifdef HAVE_ANDROID_OS
+ // Check whether there's a system property overriding the number of records.
+ const char* propertyName = "dalvik.vm.allocTrackerMax";
+ char allocRecordMaxString[PROPERTY_VALUE_MAX];
+ if (property_get(propertyName, allocRecordMaxString, "") > 0) {
+ char* end;
+ size_t value = strtoul(allocRecordMaxString, &end, 10);
+ if (*end != '\0') {
+ ALOGE("Ignoring %s '%s' --- invalid", propertyName, allocRecordMaxString);
+ return kDefaultNumAllocRecords;
+ }
+ if (!isPowerOfTwo(value)) {
+ ALOGE("Ignoring %s '%s' --- not power of two", propertyName, allocRecordMaxString);
+ return kDefaultNumAllocRecords;
+ }
+ return value;
+ }
+#endif
+ return kDefaultNumAllocRecords;
+}
+
/*
* Enable allocation tracking. Does nothing if tracking is already enabled.
*
@@ -111,12 +131,13 @@ bool dvmEnableAllocTracker()
dvmLockMutex(&gDvm.allocTrackerLock);
if (gDvm.allocRecords == NULL) {
+ gDvm.allocRecordMax = getAllocRecordMax();
+
ALOGI("Enabling alloc tracker (%d entries, %d frames --> %d bytes)",
- kNumAllocRecords, kMaxAllocRecordStackDepth,
- sizeof(AllocRecord) * kNumAllocRecords);
+ gDvm.allocRecordMax, kMaxAllocRecordStackDepth,
+ sizeof(AllocRecord) * gDvm.allocRecordMax);
gDvm.allocRecordHead = gDvm.allocRecordCount = 0;
- gDvm.allocRecords =
- (AllocRecord*) malloc(sizeof(AllocRecord) * kNumAllocRecords);
+ gDvm.allocRecords = (AllocRecord*) malloc(sizeof(AllocRecord) * gDvm.allocRecordMax);
if (gDvm.allocRecords == NULL)
result = false;
@@ -199,7 +220,7 @@ void dvmDoTrackAllocation(ClassObject* clazz, size_t size)
}
/* advance and clip */
- if (++gDvm.allocRecordHead == kNumAllocRecords)
+ if (++gDvm.allocRecordHead == gDvm.allocRecordMax)
gDvm.allocRecordHead = 0;
AllocRecord* pRec = &gDvm.allocRecords[gDvm.allocRecordHead];
@@ -209,7 +230,7 @@ void dvmDoTrackAllocation(ClassObject* clazz, size_t size)
pRec->threadId = self->threadId;
getStackFrames(self, pRec);
- if (gDvm.allocRecordCount < kNumAllocRecords)
+ if (gDvm.allocRecordCount < gDvm.allocRecordMax)
gDvm.allocRecordCount++;
dvmUnlockMutex(&gDvm.allocTrackerLock);
@@ -252,7 +273,7 @@ Message header (all values big-endian):
followed by UTF-16 data.
We send up 16-bit unsigned indexes into string tables. In theory there
-can be (kMaxAllocRecordStackDepth * kNumAllocRecords) unique strings in
+can be (kMaxAllocRecordStackDepth * gDvm.allocRecordMax) unique strings in
each table, but in practice there should be far fewer.
The chief reason for using a string table here is to keep the size of
@@ -276,12 +297,12 @@ const int kStackFrameLen = 8;
* from it.
*
* We need to handle underflow in our circular buffer, so we add
- * kNumAllocRecords and then mask it back down.
+ * gDvm.allocRecordMax and then mask it back down.
*/
inline static int headIndex()
{
- return (gDvm.allocRecordHead+1 + kNumAllocRecords - gDvm.allocRecordCount)
- & (kNumAllocRecords-1);
+ return (gDvm.allocRecordHead+1 + gDvm.allocRecordMax - gDvm.allocRecordCount)
+ & (gDvm.allocRecordMax-1);
}
/*
@@ -348,7 +369,7 @@ static bool populateStringTables(PointerSet* classNames,
fileCount++;
}
- idx = (idx + 1) & (kNumAllocRecords-1);
+ idx = (idx + 1) & (gDvm.allocRecordMax-1);
}
ALOGI("class %d/%d, method %d/%d, file %d/%d",
@@ -431,7 +452,7 @@ static size_t generateBaseOutput(u1* ptr, size_t baseLen,
ptr += kStackFrameLen;
}
- idx = (idx + 1) & (kNumAllocRecords-1);
+ idx = (idx + 1) & (gDvm.allocRecordMax-1);
}
return ptr - origPtr;
@@ -641,7 +662,7 @@ void dvmDumpTrackedAllocations(bool enable)
if ((count % 5) == 0)
usleep(40000);
- idx = (idx + 1) & (kNumAllocRecords-1);
+ idx = (idx + 1) & (gDvm.allocRecordMax-1);
}
dvmUnlockMutex(&gDvm.allocTrackerLock);
diff --git a/vm/Android.mk b/vm/Android.mk
index c9b510b46..e5d5448ce 100644
--- a/vm/Android.mk
+++ b/vm/Android.mk
@@ -111,7 +111,7 @@ ifeq ($(WITH_HOST_DALVIK),true)
WITH_JIT := true
include $(LOCAL_PATH)/Dvm.mk
- LOCAL_SHARED_LIBRARIES += libcrypto libssl libicuuc libicui18n
+ LOCAL_SHARED_LIBRARIES += libnativehelper libcrypto-host libssl-host libicuuc-host libicui18n-host
LOCAL_LDLIBS := -lpthread -ldl
ifeq ($(HOST_OS),linux)
diff --git a/vm/CheckJni.cpp b/vm/CheckJni.cpp
index 68d6680d6..dac242a79 100644
--- a/vm/CheckJni.cpp
+++ b/vm/CheckJni.cpp
@@ -239,7 +239,7 @@ public:
void checkFieldTypeForGet(jfieldID fid, const char* expectedSignature, bool isStatic) {
if (fid == NULL) {
- ALOGW("JNI WARNING: null jfieldID");
+ ALOGW("JNI WARNING: null jfieldID (%s)", mFunctionName);
showLocation();
abortMaybe();
}
@@ -282,7 +282,7 @@ public:
*/
void checkFieldTypeForSet(jobject jobj, jfieldID fieldID, PrimitiveType prim, bool isStatic) {
if (fieldID == NULL) {
- ALOGW("JNI WARNING: null jfieldID");
+ ALOGW("JNI WARNING: null jfieldID (%s)", mFunctionName);
showLocation();
abortMaybe();
}
@@ -298,8 +298,8 @@ public:
* and valid.
*/
if (obj != NULL && !dvmIsHeapAddress(obj)) {
- ALOGW("JNI WARNING: field operation on invalid %s reference (%p)",
- indirectRefKindName(jobj), jobj);
+ ALOGW("JNI WARNING: field operation (%s) on invalid %s reference (%p)",
+ mFunctionName, indirectRefKindName(jobj), jobj);
printWarn = true;
} else {
ClassObject* fieldClass = dvmFindLoadedClass(field->signature);
@@ -309,8 +309,8 @@ public:
assert(objClass != NULL);
if (!dvmInstanceof(objClass, fieldClass)) {
- ALOGW("JNI WARNING: set field '%s' expected type %s, got %s",
- field->name, field->signature, objClass->descriptor);
+ ALOGW("JNI WARNING: %s for field '%s' expected type %s, got %s",
+ mFunctionName, field->name, field->signature, objClass->descriptor);
printWarn = true;
}
}
@@ -320,9 +320,9 @@ public:
printWarn = true;
} else if (isStatic && !dvmIsStaticField(field)) {
if (isStatic) {
- ALOGW("JNI WARNING: accessing non-static field %s as static", field->name);
+ ALOGW("JNI WARNING: %s for non-static field '%s'", mFunctionName, field->name);
} else {
- ALOGW("JNI WARNING: accessing static field %s as non-static", field->name);
+ ALOGW("JNI WARNING: %s for static field '%s'", mFunctionName, field->name);
}
printWarn = true;
}
@@ -343,7 +343,7 @@ public:
Object* obj = dvmDecodeIndirectRef(self(), jobj);
if (!dvmIsHeapAddress(obj)) {
- ALOGW("JNI ERROR: field operation on invalid reference (%p)", jobj);
+ ALOGW("JNI ERROR: %s on invalid reference (%p)", mFunctionName, jobj);
dvmAbort();
}
@@ -361,8 +361,8 @@ public:
clazz = clazz->super;
}
- ALOGW("JNI WARNING: instance fieldID %p not valid for class %s",
- fieldID, obj->clazz->descriptor);
+ ALOGW("JNI WARNING: instance jfieldID %p not valid for class %s (%s)",
+ fieldID, obj->clazz->descriptor, mFunctionName);
showLocation();
abortMaybe();
}
@@ -386,13 +386,13 @@ public:
bool printWarn = false;
if (*expectedType != method->shorty[0]) {
- ALOGW("JNI WARNING: expected return type '%s'", expectedType);
+ ALOGW("JNI WARNING: %s expected return type '%s'", mFunctionName, expectedType);
printWarn = true;
} else if (isStatic && !dvmIsStaticMethod(method)) {
if (isStatic) {
- ALOGW("JNI WARNING: calling non-static method with static call");
+ ALOGW("JNI WARNING: calling non-static method with static call %s", mFunctionName);
} else {
- ALOGW("JNI WARNING: calling static method with non-static call");
+ ALOGW("JNI WARNING: calling static method with non-static call %s", mFunctionName);
}
printWarn = true;
}
@@ -417,8 +417,8 @@ public:
StaticField* base = &clazz->sfields[0];
int fieldCount = clazz->sfieldCount;
if ((StaticField*) fieldID < base || (StaticField*) fieldID >= base + fieldCount) {
- ALOGW("JNI WARNING: static fieldID %p not valid for class %s",
- fieldID, clazz->descriptor);
+ ALOGW("JNI WARNING: static fieldID %p not valid for class %s (%s)",
+ fieldID, clazz->descriptor, mFunctionName);
ALOGW(" base=%p count=%d", base, fieldCount);
showLocation();
abortMaybe();
@@ -441,8 +441,8 @@ public:
const Method* method = (const Method*) methodID;
if (!dvmInstanceof(clazz, method->clazz)) {
- ALOGW("JNI WARNING: can't call static %s.%s on class %s",
- method->clazz->descriptor, method->name, clazz->descriptor);
+ ALOGW("JNI WARNING: can't call static %s.%s on class %s (%s)",
+ method->clazz->descriptor, method->name, clazz->descriptor, mFunctionName);
showLocation();
// no abort?
}
@@ -462,8 +462,8 @@ public:
const Method* method = (const Method*) methodID;
if (!dvmInstanceof(obj->clazz, method->clazz)) {
- ALOGW("JNI WARNING: can't call %s.%s on instance of %s",
- method->clazz->descriptor, method->name, obj->clazz->descriptor);
+ ALOGW("JNI WARNING: can't call %s.%s on instance of %s (%s)",
+ method->clazz->descriptor, method->name, obj->clazz->descriptor, mFunctionName);
showLocation();
abortMaybe();
}
@@ -737,7 +737,7 @@ private:
*/
void checkArray(jarray jarr) {
if (jarr == NULL) {
- ALOGW("JNI WARNING: received null array");
+ ALOGW("JNI WARNING: %s received null array", mFunctionName);
showLocation();
abortMaybe();
return;
@@ -748,12 +748,12 @@ private:
Object* obj = dvmDecodeIndirectRef(self(), jarr);
if (!dvmIsHeapAddress(obj)) {
- ALOGW("JNI WARNING: jarray is an invalid %s reference (%p)",
- indirectRefKindName(jarr), jarr);
+ ALOGW("JNI WARNING: %s: jarray is an invalid %s reference (%p)",
+ mFunctionName, indirectRefKindName(jarr), jarr);
printWarn = true;
} else if (obj->clazz->descriptor[0] != '[') {
- ALOGW("JNI WARNING: jarray arg has wrong type (expected array, got %s)",
- obj->clazz->descriptor);
+ ALOGW("JNI WARNING: %s: jarray arg has wrong type (expected array, got %s)",
+ mFunctionName, obj->clazz->descriptor);
printWarn = true;
}
@@ -789,17 +789,18 @@ private:
bool printWarn = false;
if (dvmGetJNIRefType(self(), jobj) == JNIInvalidRefType) {
- ALOGW("JNI WARNING: %p is not a valid JNI reference", jobj);
+ ALOGW("JNI WARNING: %p is not a valid JNI reference (%s)", jobj, mFunctionName);
printWarn = true;
} else {
Object* obj = dvmDecodeIndirectRef(self(), jobj);
if (obj == kInvalidIndirectRefObject) {
- ALOGW("JNI WARNING: native code passing in invalid reference %p", jobj);
+ ALOGW("JNI WARNING: native code passing in invalid reference %p (%s)",
+ jobj, mFunctionName);
printWarn = true;
} else if (obj != NULL && !dvmIsHeapAddress(obj)) {
// TODO: when we remove workAroundAppJniBugs, this should be impossible.
- ALOGW("JNI WARNING: native code passing in reference to invalid object %p %p",
- jobj, obj);
+ ALOGW("JNI WARNING: native code passing in reference to invalid object %p %p (%s)",
+ jobj, obj, mFunctionName);
printWarn = true;
}
}
@@ -843,17 +844,17 @@ private:
*/
bool printWarn = false;
if (threadEnv == NULL) {
- ALOGE("JNI ERROR: non-VM thread making JNI calls");
+ ALOGE("JNI ERROR: non-VM thread making JNI call (%s)", mFunctionName);
// don't set printWarn -- it'll try to call showLocation()
dvmAbort();
} else if ((JNIEnvExt*) mEnv != threadEnv) {
if (dvmThreadSelf()->threadId != threadEnv->envThreadId) {
- ALOGE("JNI: threadEnv != thread->env?");
+ ALOGE("JNI: threadEnv != thread->env? (%s)", mFunctionName);
dvmAbort();
}
- ALOGW("JNI WARNING: threadid=%d using env from threadid=%d",
- threadEnv->envThreadId, ((JNIEnvExt*) mEnv)->envThreadId);
+ ALOGW("JNI WARNING: threadid=%d using env from threadid=%d (%s)",
+ threadEnv->envThreadId, ((JNIEnvExt*) mEnv)->envThreadId, mFunctionName);
printWarn = true;
// If we're keeping broken code limping along, we need to suppress the abort...
@@ -865,8 +866,8 @@ private:
//dvmThrowRuntimeException("invalid use of JNI env ptr");
} else if (((JNIEnvExt*) mEnv)->self != dvmThreadSelf()) {
/* correct JNIEnv*; make sure the "self" pointer is correct */
- ALOGE("JNI ERROR: env->self != thread-self (%p vs. %p)",
- ((JNIEnvExt*) mEnv)->self, dvmThreadSelf());
+ ALOGE("JNI ERROR: env->self != thread-self (%p vs. %p) (%s)",
+ ((JNIEnvExt*) mEnv)->self, dvmThreadSelf(), mFunctionName);
dvmAbort();
}
@@ -879,8 +880,8 @@ private:
break;
case kFlag_CritBad: // not okay to call
if (threadEnv->critical) {
- ALOGW("JNI WARNING: threadid=%d using JNI after critical get",
- threadEnv->envThreadId);
+ ALOGW("JNI WARNING: threadid=%d using JNI after critical get (%s)",
+ threadEnv->envThreadId, mFunctionName);
printWarn = true;
}
break;
@@ -891,8 +892,8 @@ private:
case kFlag_CritRelease: // this is a "release" call
threadEnv->critical--;
if (threadEnv->critical < 0) {
- ALOGW("JNI WARNING: threadid=%d called too many crit releases",
- threadEnv->envThreadId);
+ ALOGW("JNI WARNING: threadid=%d called too many critical releases (%s)",
+ threadEnv->envThreadId, mFunctionName);
printWarn = true;
}
break;
@@ -906,7 +907,7 @@ private:
*/
bool printException = false;
if ((flags & kFlag_ExcepOkay) == 0 && dvmCheckException(dvmThreadSelf())) {
- ALOGW("JNI WARNING: JNI method called with exception pending");
+ ALOGW("JNI WARNING: JNI function %s called with exception pending", mFunctionName);
printWarn = true;
printException = true;
}
@@ -929,7 +930,7 @@ private:
void checkUtfString(const char* bytes, bool nullable) {
if (bytes == NULL) {
if (!nullable) {
- ALOGW("JNI WARNING: non-nullable const char* was NULL");
+ ALOGW("JNI WARNING: non-nullable const char* was NULL (%s)", mFunctionName);
showLocation();
abortMaybe();
}
@@ -939,7 +940,8 @@ private:
const char* errorKind = NULL;
u1 utf8 = checkUtfBytes(bytes, &errorKind);
if (errorKind != NULL) {
- ALOGW("JNI WARNING: input is not valid Modified UTF-8: illegal %s byte %#x", errorKind, utf8);
+ ALOGW("JNI WARNING: %s input is not valid Modified UTF-8: illegal %s byte %#x",
+ mFunctionName, errorKind, utf8);
ALOGW(" string: '%s'", bytes);
showLocation();
abortMaybe();
@@ -955,7 +957,7 @@ private:
*/
void checkInstance(jobject jobj, ClassObject* expectedClass, const char* argName) {
if (jobj == NULL) {
- ALOGW("JNI WARNING: received null %s", argName);
+ ALOGW("JNI WARNING: received null %s (%s)", argName, mFunctionName);
showLocation();
abortMaybe();
return;
@@ -966,12 +968,12 @@ private:
Object* obj = dvmDecodeIndirectRef(self(), jobj);
if (!dvmIsHeapAddress(obj)) {
- ALOGW("JNI WARNING: %s is an invalid %s reference (%p)",
- argName, indirectRefKindName(jobj), jobj);
+ ALOGW("JNI WARNING: %s is an invalid %s reference (%p) (%s)",
+ argName, indirectRefKindName(jobj), jobj, mFunctionName);
printWarn = true;
} else if (obj->clazz != expectedClass) {
- ALOGW("JNI WARNING: %s arg has wrong type (expected %s, got %s)",
- argName, expectedClass->descriptor, obj->clazz->descriptor);
+ ALOGW("JNI WARNING: %s arg has wrong type (expected %s, got %s) (%s)",
+ argName, expectedClass->descriptor, obj->clazz->descriptor, mFunctionName);
printWarn = true;
}
diff --git a/vm/Dvm.mk b/vm/Dvm.mk
index 2f53391b8..2af05823b 100644
--- a/vm/Dvm.mk
+++ b/vm/Dvm.mk
@@ -24,10 +24,15 @@
#
# Compiler defines.
#
-LOCAL_CFLAGS += -fstrict-aliasing -Wstrict-aliasing=2 -fno-align-jumps
+
+LOCAL_CFLAGS += -fstrict-aliasing -Wstrict-aliasing=2
LOCAL_CFLAGS += -Wall -Wextra -Wno-unused-parameter
LOCAL_CFLAGS += -DARCH_VARIANT=\"$(dvm_arch_variant)\"
+ifneq ($(strip $(LOCAL_CLANG)),true)
+LOCAL_CFLAGS += -fno-align-jumps
+endif
+
#
# Optional features. These may impact the size or performance of the VM.
#
@@ -222,7 +227,6 @@ ifeq ($(WITH_JIT),true)
endif
LOCAL_C_INCLUDES += \
- $(JNI_H_INCLUDE) \
dalvik \
dalvik/vm \
external/zlib \
diff --git a/vm/DvmDex.h b/vm/DvmDex.h
index b3b596048..1785c26d0 100644
--- a/vm/DvmDex.h
+++ b/vm/DvmDex.h
@@ -21,6 +21,7 @@
#ifndef DALVIK_DVMDEX_H_
#define DALVIK_DVMDEX_H_
+#include "jni.h"
#include "libdex/DexFile.h"
/* extern */
@@ -61,6 +62,8 @@ struct DvmDex {
bool isMappedReadOnly;
MemMapping memMap;
+ jobject dex_object;
+
/* lock ensuring mutual exclusion during updates */
pthread_mutex_t modLock;
};
diff --git a/vm/Globals.h b/vm/Globals.h
index 234a57d84..29f7356ad 100644
--- a/vm/Globals.h
+++ b/vm/Globals.h
@@ -90,6 +90,7 @@ struct DvmGlobals {
size_t heapStartingSize;
size_t heapMaximumSize;
size_t heapGrowthLimit;
+ bool lowMemoryMode;
double heapTargetUtilization;
size_t heapMinFree;
size_t heapMaxFree;
@@ -124,7 +125,6 @@ struct DvmGlobals {
void (*abortHook)(void);
bool (*isSensitiveThreadHook)(void);
- int jniGrefLimit; // 0 means no limit
char* jniTrace;
bool reduceSignals;
bool noQuitHandler;
@@ -268,10 +268,11 @@ struct DvmGlobals {
ClassObject* classJavaLangReflectMethod;
ClassObject* classJavaLangReflectMethodArray;
ClassObject* classJavaLangReflectProxy;
+ ClassObject* classJavaLangSystem;
ClassObject* classJavaNioDirectByteBuffer;
- ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationFactory;
- ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMember;
- ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMemberArray;
+ ClassObject* classLibcoreReflectAnnotationFactory;
+ ClassObject* classLibcoreReflectAnnotationMember;
+ ClassObject* classLibcoreReflectAnnotationMemberArray;
ClassObject* classOrgApacheHarmonyDalvikDdmcChunk;
ClassObject* classOrgApacheHarmonyDalvikDdmcDdmServer;
ClassObject* classJavaLangRefFinalizerReference;
@@ -407,6 +408,9 @@ struct DvmGlobals {
/* field offsets - java.lang.reflect.Proxy */
int offJavaLangReflectProxy_h;
+ /* direct method pointer - java.lang.System.runFinalization */
+ Method* methJavaLangSystem_runFinalization;
+
/* field offsets - java.io.FileDescriptor */
int offJavaIoFileDescriptor_descriptor;
@@ -525,8 +529,6 @@ struct DvmGlobals {
IndirectRefTable jniWeakGlobalRefTable;
pthread_mutex_t jniGlobalRefLock;
pthread_mutex_t jniWeakGlobalRefLock;
- int jniGlobalRefHiMark;
- int jniGlobalRefLoMark;
/*
* JNI pinned object table (used for primitive arrays).
@@ -649,6 +651,7 @@ struct DvmGlobals {
AllocRecord* allocRecords;
int allocRecordHead; /* most-recently-added entry */
int allocRecordCount; /* #of valid entries */
+ int allocRecordMax; /* Number of allocated entries. */
/*
* When a profiler is enabled, this is incremented. Distinct profilers
@@ -738,6 +741,8 @@ extern struct DvmGlobals gDvm;
#if defined(WITH_JIT)
+#define DEFAULT_CODE_CACHE_SIZE 0xffffffff
+
/* Trace profiling modes. Ordering matters - off states before on states */
enum TraceProfilingModes {
kTraceProfilingDisabled = 0, // Not profiling
@@ -802,7 +807,7 @@ struct DvmJitGlobals {
/* How many entries in the JitEntryTable are in use */
unsigned int jitTableEntriesUsed;
- /* Bytes allocated for the code cache */
+ /* Max bytes allocated for the code cache. Rough rule of thumb: 1K per 1M of system RAM */
unsigned int codeCacheSize;
/* Trigger for trace selection */
diff --git a/vm/Init.cpp b/vm/Init.cpp
index 9169a5da6..f1762c96c 100644
--- a/vm/Init.cpp
+++ b/vm/Init.cpp
@@ -36,6 +36,7 @@
#include "test/Test.h"
#include "mterp/Mterp.h"
#include "Hash.h"
+#include "JniConstants.h"
#if defined(WITH_JIT)
#include "compiler/codegen/Optimizer.h"
@@ -120,8 +121,6 @@ static void usage(const char* progName)
dvmFprintf(stderr, " -Xzygote\n");
dvmFprintf(stderr, " -Xdexopt:{none,verified,all,full}\n");
dvmFprintf(stderr, " -Xnoquithandler\n");
- dvmFprintf(stderr,
- " -Xjnigreflimit:N (must be multiple of 100, >= 200)\n");
dvmFprintf(stderr, " -Xjniopts:{warnonly,forcecopy}\n");
dvmFprintf(stderr, " -Xjnitrace:substring (eg NativeClass or nativeMethod)\n");
dvmFprintf(stderr, " -Xstacktracefile:<filename>\n");
@@ -140,6 +139,7 @@ static void usage(const char* progName)
"[,hexopvalue[-endvalue]]*\n");
dvmFprintf(stderr, " -Xincludeselectedmethod\n");
dvmFprintf(stderr, " -Xjitthreshold:decimalvalue\n");
+ dvmFprintf(stderr, " -Xjitcodecachesize:decimalvalueofkbytes\n");
dvmFprintf(stderr, " -Xjitblocking\n");
dvmFprintf(stderr, " -Xjitmethod:signature[,signature]* "
"(eg Ljava/lang/String\\;replace)\n");
@@ -940,6 +940,8 @@ static int processOptions(int argc, const char* const argv[],
dvmFprintf(stderr, "Invalid -XX:HeapMaxFree option '%s'\n", argv[i]);
return -1;
}
+ } else if (strcmp(argv[i], "-XX:LowMemoryMode") == 0) {
+ gDvm.lowMemoryMode = true;
} else if (strncmp(argv[i], "-XX:HeapTargetUtilization=", 26) == 0) {
const char* start = argv[i] + 26;
const char* end = start;
@@ -1075,13 +1077,7 @@ static int processOptions(int argc, const char* const argv[],
return -1;
}
} else if (strncmp(argv[i], "-Xjnigreflimit:", 15) == 0) {
- int lim = atoi(argv[i] + 15);
- if (lim < 200 || (lim % 100) != 0) {
- dvmFprintf(stderr, "Bad value for -Xjnigreflimit: '%s'\n",
- argv[i]+15);
- return -1;
- }
- gDvm.jniGrefLimit = lim;
+ // Ignored for backwards compatibility.
} else if (strncmp(argv[i], "-Xjnitrace:", 11) == 0) {
gDvm.jniTrace = strdup(argv[i] + 11);
} else if (strcmp(argv[i], "-Xlog-stdio") == 0) {
@@ -1125,6 +1121,11 @@ static int processOptions(int argc, const char* const argv[],
gDvmJit.blockingMode = true;
} else if (strncmp(argv[i], "-Xjitthreshold:", 15) == 0) {
gDvmJit.threshold = atoi(argv[i] + 15);
+ } else if (strncmp(argv[i], "-Xjitcodecachesize:", 19) == 0) {
+ gDvmJit.codeCacheSize = atoi(argv[i] + 19) * 1024;
+ if (gDvmJit.codeCacheSize == 0) {
+ gDvm.executionMode = kExecutionModeInterpFast;
+ }
} else if (strncmp(argv[i], "-Xincludeselectedop", 19) == 0) {
gDvmJit.includeSelectedOp = true;
} else if (strncmp(argv[i], "-Xincludeselectedmethod", 23) == 0) {
@@ -1240,6 +1241,7 @@ static void setCommandLineDefaults()
gDvm.heapStartingSize = 2 * 1024 * 1024; // Spec says 16MB; too big for us.
gDvm.heapMaximumSize = 16 * 1024 * 1024; // Spec says 75% physical mem
gDvm.heapGrowthLimit = 0; // 0 means no growth limit
+ gDvm.lowMemoryMode = false;
gDvm.stackSize = kDefaultStackSize;
gDvm.mainThreadStackSize = kDefaultStackSize;
// When the heap is less than the maximum or growth limited size,
@@ -1280,6 +1282,7 @@ static void setCommandLineDefaults()
gDvmJit.includeSelectedOffset = false;
gDvmJit.methodTable = NULL;
gDvmJit.classTable = NULL;
+ gDvmJit.codeCacheSize = DEFAULT_CODE_CACHE_SIZE;
gDvm.constInit = false;
gDvm.commonInit = false;
@@ -1336,7 +1339,7 @@ static void blockSignals()
#if defined(WITH_JIT) && defined(WITH_JIT_TUNING)
sigaddset(&mask, SIGUSR2); // used to investigate JIT internals
#endif
- //sigaddset(&mask, SIGPIPE);
+ sigaddset(&mask, SIGPIPE);
cc = sigprocmask(SIG_BLOCK, &mask, NULL);
assert(cc == 0);
@@ -1621,6 +1624,23 @@ static bool registerSystemNatives(JNIEnv* pEnv)
// Must set this before allowing JNI-based method registration.
self->status = THREAD_NATIVE;
+ // First set up JniConstants, which is used by libcore.
+ JniConstants::init(pEnv);
+
+ // Set up our single JNI method.
+ // TODO: factor this out if we add more.
+ jclass c = pEnv->FindClass("java/lang/Class");
+ if (c == NULL) {
+ dvmAbort();
+ }
+ JNIEXPORT jobject JNICALL Java_java_lang_Class_getDex(JNIEnv* env, jclass javaClass);
+ const JNINativeMethod Java_java_lang_Class[] = {
+ { "getDex", "()Lcom/android/dex/Dex;", (void*) Java_java_lang_Class_getDex },
+ };
+ if (pEnv->RegisterNatives(c, Java_java_lang_Class, 1) != JNI_OK) {
+ dvmAbort();
+ }
+
// Most JNI libraries can just use System.loadLibrary, but you can't
// if you're the library that implements System.loadLibrary!
loadJniLibrary("javacore");
@@ -1688,7 +1708,7 @@ static bool initZygote()
const char* target_base = getenv("EMULATED_STORAGE_TARGET");
if (target_base != NULL) {
if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
- "uid=0,gid=1028,mode=0050") == -1) {
+ "uid=0,gid=1028,mode=0751") == -1) {
SLOGE("Failed to mount tmpfs to %s: %s", target_base, strerror(errno));
return -1;
}
@@ -1716,13 +1736,10 @@ static bool initZygote()
#ifdef HAVE_ANDROID_OS
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) < 0) {
- if (errno == EINVAL) {
- SLOGW("PR_SET_NO_NEW_PRIVS failed. "
- "Is your kernel compiled correctly?: %s", strerror(errno));
- // Don't return -1 here, since it's expected that not all
- // kernels will support this option.
- } else {
- SLOGW("PR_SET_NO_NEW_PRIVS failed: %s", strerror(errno));
+ // Older kernels don't understand PR_SET_NO_NEW_PRIVS and return
+ // EINVAL. Don't die on such kernels.
+ if (errno != EINVAL) {
+ SLOGE("PR_SET_NO_NEW_PRIVS failed: %s", strerror(errno));
return -1;
}
}
@@ -2142,17 +2159,6 @@ void dvmAbort()
*/
dvmPrintNativeBackTrace();
- /*
- * If we call abort(), all threads in the process receives a SIBABRT.
- * debuggerd dumps the stack trace of the main thread, whether or not
- * that was the thread that failed.
- *
- * By stuffing a value into a bogus address, we cause a segmentation
- * fault in the current thread, and get a useful log from debuggerd.
- * We can also trivially tell the difference between a VM crash and
- * a deliberate abort by looking at the fault address.
- */
- *((char*)0xdeadd00d) = result;
abort();
/* notreached */
diff --git a/vm/InitRefs.cpp b/vm/InitRefs.cpp
index 9c1e1cb9f..08c28f856 100644
--- a/vm/InitRefs.cpp
+++ b/vm/InitRefs.cpp
@@ -127,17 +127,14 @@ static bool initClassReferences() {
{ &gDvm.classJavaLangReflectMethod, "Ljava/lang/reflect/Method;" },
{ &gDvm.classJavaLangReflectMethodArray, "[Ljava/lang/reflect/Method;"},
{ &gDvm.classJavaLangReflectProxy, "Ljava/lang/reflect/Proxy;" },
+ { &gDvm.classJavaLangSystem, "Ljava/lang/System;" },
{ &gDvm.classJavaNioDirectByteBuffer, "Ljava/nio/DirectByteBuffer;" },
- { &gDvm.classOrgApacheHarmonyDalvikDdmcChunk,
- "Lorg/apache/harmony/dalvik/ddmc/Chunk;" },
+ { &gDvm.classOrgApacheHarmonyDalvikDdmcChunk, "Lorg/apache/harmony/dalvik/ddmc/Chunk;" },
{ &gDvm.classOrgApacheHarmonyDalvikDdmcDdmServer,
"Lorg/apache/harmony/dalvik/ddmc/DdmServer;" },
- { &gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory,
- "Lorg/apache/harmony/lang/annotation/AnnotationFactory;" },
- { &gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember,
- "Lorg/apache/harmony/lang/annotation/AnnotationMember;" },
- { &gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray,
- "[Lorg/apache/harmony/lang/annotation/AnnotationMember;" },
+ { &gDvm.classLibcoreReflectAnnotationFactory, "Llibcore/reflect/AnnotationFactory;" },
+ { &gDvm.classLibcoreReflectAnnotationMember, "Llibcore/reflect/AnnotationMember;" },
+ { &gDvm.classLibcoreReflectAnnotationMemberArray, "[Llibcore/reflect/AnnotationMember;" },
{ NULL, NULL }
};
@@ -325,16 +322,16 @@ static bool initConstructorReferences() {
{ &gDvm.methJavaLangStackTraceElement_init, "Ljava/lang/StackTraceElement;",
"(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V" },
{ &gDvm.methJavaLangReflectConstructor_init, "Ljava/lang/reflect/Constructor;",
- "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;I)V" },
+ "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;II)V" },
{ &gDvm.methJavaLangReflectField_init, "Ljava/lang/reflect/Field;",
- "(Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;I)V" },
+ "(Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;II)V" },
{ &gDvm.methJavaLangReflectMethod_init, "Ljava/lang/reflect/Method;",
"(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;Ljava/lang/Class;"
- "Ljava/lang/String;I)V" },
+ "Ljava/lang/String;II)V" },
{ &gDvm.methJavaNioDirectByteBuffer_init, "Ljava/nio/DirectByteBuffer;",
"(JI)V" },
{ &gDvm.methOrgApacheHarmonyLangAnnotationAnnotationMember_init,
- "Lorg/apache/harmony/lang/annotation/AnnotationMember;",
+ "Llibcore/reflect/AnnotationMember;",
"(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/reflect/Method;)V" },
{ NULL, NULL, NULL }
};
@@ -361,11 +358,14 @@ static bool initDirectMethodReferences() {
"getSystemClassLoader", "()Ljava/lang/ClassLoader;" },
{ &gDvm.methJavaLangReflectProxy_constructorPrototype, "Ljava/lang/reflect/Proxy;",
"constructorPrototype", "(Ljava/lang/reflect/InvocationHandler;)V" },
+ { &gDvm.methJavaLangSystem_runFinalization, "Ljava/lang/System;",
+ "runFinalization", "()V" },
+
{ &gDvm.methodTraceGcMethod, "Ldalvik/system/VMDebug;", "startGC", "()V" },
{ &gDvm.methodTraceClassPrepMethod, "Ldalvik/system/VMDebug;", "startClassPrep", "()V" },
{ &gDvm.methOrgApacheHarmonyLangAnnotationAnnotationFactory_createAnnotation,
- "Lorg/apache/harmony/lang/annotation/AnnotationFactory;", "createAnnotation",
- "(Ljava/lang/Class;[Lorg/apache/harmony/lang/annotation/AnnotationMember;)"
+ "Llibcore/reflect/AnnotationFactory;", "createAnnotation",
+ "(Ljava/lang/Class;[Llibcore/reflect/AnnotationMember;)"
"Ljava/lang/annotation/Annotation;" },
{ &gDvm.methDalvikSystemNativeStart_main, "Ldalvik/system/NativeStart;", "main", "([Ljava/lang/String;)V" },
{ &gDvm.methDalvikSystemNativeStart_run, "Ldalvik/system/NativeStart;", "run", "()V" },
diff --git a/vm/Jni.cpp b/vm/Jni.cpp
index e571ad867..0e77fab3e 100644
--- a/vm/Jni.cpp
+++ b/vm/Jni.cpp
@@ -248,8 +248,6 @@ private:
#define kGlobalRefsTableInitialSize 512
#define kGlobalRefsTableMaxSize 51200 /* arbitrary, must be < 64K */
-#define kGrefWaterInterval 100
-#define kTrackGrefUsage true
#define kWeakGlobalRefsTableInitialSize 16
@@ -271,8 +269,6 @@ bool dvmJniStartup() {
dvmInitMutex(&gDvm.jniGlobalRefLock);
dvmInitMutex(&gDvm.jniWeakGlobalRefLock);
- gDvm.jniGlobalRefLoMark = 0;
- gDvm.jniGlobalRefHiMark = kGrefWaterInterval * 2;
if (!dvmInitReferenceTable(&gDvm.jniPinRefTable, kPinTableInitialSize, kPinTableMaxSize)) {
return false;
@@ -289,6 +285,11 @@ void dvmJniShutdown() {
dvmClearReferenceTable(&gDvm.jniPinRefTable);
}
+bool dvmIsBadJniVersion(int version) {
+ // We don't support JNI_VERSION_1_1. These are the only other valid versions.
+ return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
+}
+
/*
* Find the JNIEnv associated with the current thread.
*
@@ -499,27 +500,6 @@ static jobject addGlobalReference(Object* obj) {
dvmGetCurrentJNIMethod()->clazz->descriptor,
dvmGetCurrentJNIMethod()->name);
- /* GREF usage tracking; should probably be disabled for production env */
- if (kTrackGrefUsage && gDvm.jniGrefLimit != 0) {
- int count = gDvm.jniGlobalRefTable.capacity();
- // TODO: adjust for "holes"
- if (count > gDvm.jniGlobalRefHiMark) {
- ALOGD("GREF has increased to %d", count);
- gDvm.jniGlobalRefHiMark += kGrefWaterInterval;
- gDvm.jniGlobalRefLoMark += kGrefWaterInterval;
-
- /* watch for "excessive" use; not generally appropriate */
- if (count >= gDvm.jniGrefLimit) {
- if (gDvmJni.warnOnly) {
- ALOGW("Excessive JNI global references (%d)", count);
- } else {
- gDvm.jniGlobalRefTable.dump("JNI global");
- ALOGE("Excessive JNI global references (%d)", count);
- ReportJniError();
- }
- }
- }
- }
return jobj;
}
@@ -568,16 +548,6 @@ static void deleteGlobalReference(jobject jobj) {
ALOGW("JNI: DeleteGlobalRef(%p) failed to find entry", jobj);
return;
}
-
- if (kTrackGrefUsage && gDvm.jniGrefLimit != 0) {
- int count = gDvm.jniGlobalRefTable.capacity();
- // TODO: not quite right, need to subtract holes
- if (count < gDvm.jniGlobalRefLoMark) {
- ALOGD("GREF has decreased to %d", count);
- gDvm.jniGlobalRefHiMark -= kGrefWaterInterval;
- gDvm.jniGlobalRefLoMark -= kGrefWaterInterval;
- }
- }
}
/*
@@ -601,26 +571,23 @@ static void pinPrimitiveArray(ArrayObject* arrayObj) {
}
/*
- * If we're watching global ref usage, also keep an eye on these.
- *
* The total number of pinned primitive arrays should be pretty small.
* A single array should not be pinned more than once or twice; any
* more than that is a strong indicator that a Release function is
* not being called.
*/
- if (kTrackGrefUsage && gDvm.jniGrefLimit != 0) {
- int count = 0;
- Object** ppObj = gDvm.jniPinRefTable.table;
- while (ppObj < gDvm.jniPinRefTable.nextEntry) {
- if (*ppObj++ == (Object*) arrayObj)
- count++;
+ int count = 0;
+ Object** ppObj = gDvm.jniPinRefTable.table;
+ while (ppObj < gDvm.jniPinRefTable.nextEntry) {
+ if (*ppObj++ == (Object*) arrayObj) {
+ count++;
}
+ }
- if (count > kPinComplainThreshold) {
- ALOGW("JNI: pin count on array %p (%s) is now %d",
- arrayObj, arrayObj->clazz->descriptor, count);
- /* keep going */
- }
+ if (count > kPinComplainThreshold) {
+ ALOGW("JNI: pin count on array %p (%s) is now %d",
+ arrayObj, arrayObj->clazz->descriptor, count);
+ /* keep going */
}
}
@@ -731,6 +698,11 @@ static void dumpCandidateMethods(ClassObject* clazz, const char* methodName, con
dumpMethods(clazz->directMethods, clazz->directMethodCount, methodName);
}
+static void throwNoSuchMethodError(ClassObject* c, const char* name, const char* sig, const char* kind) {
+ std::string msg(StringPrintf("no %s method \"%s.%s%s\"", kind, c->descriptor, name, sig));
+ dvmThrowNoSuchMethodError(msg.c_str());
+}
+
/*
* Register a method that uses JNI calling conventions.
*/
@@ -756,11 +728,13 @@ static bool dvmRegisterJNIMethod(ClassObject* clazz, const char* methodName,
}
if (method == NULL) {
dumpCandidateMethods(clazz, methodName, signature);
+ throwNoSuchMethodError(clazz, methodName, signature, "static or non-static");
return false;
}
if (!dvmIsNativeMethod(method)) {
ALOGW("Unable to register: not native: %s.%s:%s", clazz->descriptor, methodName, signature);
+ throwNoSuchMethodError(clazz, methodName, signature, "native");
return false;
}
@@ -2845,6 +2819,13 @@ static jint attachThread(JavaVM* vm, JNIEnv** p_env, void* thr_args, bool isDaem
argsCopy.name = NULL;
argsCopy.group = (jobject) dvmGetMainThreadGroup();
} else {
+ if (dvmIsBadJniVersion(args->version)) {
+ ALOGE("Bad JNI version passed to %s: %d",
+ (isDaemon ? "AttachCurrentThreadAsDaemon" : "AttachCurrentThread"),
+ args->version);
+ return JNI_EVERSION;
+ }
+
argsCopy.version = args->version;
argsCopy.name = args->name;
if (args->group != NULL) {
@@ -2923,7 +2904,10 @@ static jint DetachCurrentThread(JavaVM* vm) {
static jint GetEnv(JavaVM* vm, void** env, jint version) {
Thread* self = dvmThreadSelf();
- if (version < JNI_VERSION_1_1 || version > JNI_VERSION_1_6) {
+ // GetEnv also accepts JNI_VERSION_1_1, but always returns a JNIEnv*
+ // corresponding to the most current supported JNI version.
+ if (dvmIsBadJniVersion(version) && version != JNI_VERSION_1_1) {
+ ALOGE("Bad JNI version passed to GetEnv: %d", version);
return JNI_EVERSION;
}
@@ -3439,7 +3423,8 @@ jint JNI_GetCreatedJavaVMs(JavaVM** vmBuf, jsize bufLen, jsize* nVMs) {
*/
jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
const JavaVMInitArgs* args = (JavaVMInitArgs*) vm_args;
- if (args->version < JNI_VERSION_1_2) {
+ if (dvmIsBadJniVersion(args->version)) {
+ ALOGE("Bad JNI version passed to CreateJavaVM: %d", args->version);
return JNI_EVERSION;
}
@@ -3502,6 +3487,8 @@ jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
} else {
dvmFprintf(stderr, "ERROR: CreateJavaVM failed: unknown -Xjniopts option '%s'\n",
jniOpt);
+ free(pVM);
+ free(jniOpts);
return JNI_ERR;
}
jniOpt += strlen(jniOpt) + 1;
@@ -3519,6 +3506,7 @@ jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
if (gDvmJni.jniVm != NULL) {
dvmFprintf(stderr, "ERROR: Dalvik only supports one VM per process\n");
+ free(pVM);
return JNI_ERR;
}
gDvmJni.jniVm = (JavaVM*) pVM;
diff --git a/vm/JniInternal.h b/vm/JniInternal.h
index 8bb5975c2..094a640dd 100644
--- a/vm/JniInternal.h
+++ b/vm/JniInternal.h
@@ -25,6 +25,8 @@
bool dvmJniStartup(void);
void dvmJniShutdown(void);
+bool dvmIsBadJniVersion(int version);
+
/*
* Our data structures for JNIEnv and JavaVM.
*
diff --git a/vm/Native.cpp b/vm/Native.cpp
index 8892c2a34..a12c4e007 100644
--- a/vm/Native.cpp
+++ b/vm/Native.cpp
@@ -411,14 +411,14 @@ bool dvmLoadNativeCode(const char* pathName, Object* classLoader,
if (verbose)
ALOGD("Added shared lib %s %p", pathName, classLoader);
- bool result = true;
+ bool result = false;
void* vonLoad;
int version;
vonLoad = dlsym(handle, "JNI_OnLoad");
if (vonLoad == NULL) {
- ALOGD("No JNI_OnLoad found in %s %p, skipping init",
- pathName, classLoader);
+ ALOGD("No JNI_OnLoad found in %s %p, skipping init", pathName, classLoader);
+ result = true;
} else {
/*
* Call JNI_OnLoad. We have to override the current class
@@ -438,11 +438,12 @@ bool dvmLoadNativeCode(const char* pathName, Object* classLoader,
dvmChangeStatus(self, oldStatus);
self->classLoaderOverride = prevOverride;
- if (version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 &&
- version != JNI_VERSION_1_6)
- {
- ALOGW("JNI_OnLoad returned bad version (%d) in %s %p",
- version, pathName, classLoader);
+ if (version == JNI_ERR) {
+ *detail = strdup(StringPrintf("JNI_ERR returned from JNI_OnLoad in \"%s\"",
+ pathName).c_str());
+ } else if (dvmIsBadJniVersion(version)) {
+ *detail = strdup(StringPrintf("Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
+ pathName, version).c_str());
/*
* It's unwise to call dlclose() here, but we can mark it
* as bad and ensure that future load attempts will fail.
@@ -452,11 +453,12 @@ bool dvmLoadNativeCode(const char* pathName, Object* classLoader,
* newly-registered native method calls. We could try to
* unregister them, but that doesn't seem worthwhile.
*/
- result = false;
} else {
- if (gDvm.verboseJni) {
- ALOGI("[Returned from JNI_OnLoad for \"%s\"]", pathName);
- }
+ result = true;
+ }
+ if (gDvm.verboseJni) {
+ ALOGI("[Returned %s from JNI_OnLoad for \"%s\"]",
+ (result ? "successfully" : "failure"), pathName);
}
}
diff --git a/vm/Profile.cpp b/vm/Profile.cpp
index dfb50b39b..866311cda 100644
--- a/vm/Profile.cpp
+++ b/vm/Profile.cpp
@@ -118,11 +118,12 @@ static inline u8 getWallTimeInUsec()
* We use this clock when we can because it enables us to track the time that
* a thread spends running and not blocked.
*/
-static inline u8 getThreadCpuTimeInUsec()
+static inline u8 getThreadCpuTimeInUsec(Thread* thread)
{
+ clockid_t cid;
struct timespec tm;
-
- clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tm);
+ pthread_getcpuclockid(thread->handle, &cid);
+ clock_gettime(cid, &tm);
if (!(tm.tv_nsec >= 0 && tm.tv_nsec < 1*1000*1000*1000)) {
ALOGE("bad nsec: %ld", tm.tv_nsec);
dvmAbort();
@@ -137,7 +138,7 @@ static inline u8 getThreadCpuTimeInUsec()
static inline u8 getStopwatchClock()
{
#if defined(HAVE_POSIX_CLOCKS)
- return getThreadCpuTimeInUsec();
+ return getThreadCpuTimeInUsec(dvmThreadSelf());
#else
return getWallTimeInUsec();
#endif
@@ -171,6 +172,133 @@ static inline void storeLongLE(u1* buf, u8 val)
}
/*
+ * Gets a thread's stack trace as an array of method pointers of length pCount.
+ * The returned array must be freed by the caller.
+ */
+static const Method** getStackTrace(Thread* thread, size_t* pCount)
+{
+ void* fp = thread->interpSave.curFrame;
+ assert(thread == dvmThreadSelf() || dvmIsSuspended(thread));
+
+ /* Compute the stack depth. */
+ size_t stackDepth = 0;
+ while (fp != NULL) {
+ const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+
+ if (!dvmIsBreakFrame((u4*) fp))
+ stackDepth++;
+
+ assert(fp != saveArea->prevFrame);
+ fp = saveArea->prevFrame;
+ }
+ *pCount = stackDepth;
+
+ /*
+ * Allocate memory for stack trace. This must be freed later, either by
+ * freeThreadStackTraceSamples when tracing stops or by freeThread.
+ */
+ const Method** stackTrace = (const Method**) malloc(sizeof(Method*) *
+ stackDepth);
+ if (stackTrace == NULL)
+ return NULL;
+
+ /* Walk the stack a second time, filling in the stack trace. */
+ const Method** ptr = stackTrace;
+ fp = thread->interpSave.curFrame;
+ while (fp != NULL) {
+ const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ const Method* method = saveArea->method;
+
+ if (!dvmIsBreakFrame((u4*) fp)) {
+ *ptr++ = method;
+ stackDepth--;
+ }
+ assert(fp != saveArea->prevFrame);
+ fp = saveArea->prevFrame;
+ }
+ assert(stackDepth == 0);
+
+ return stackTrace;
+}
+/*
+ * Get a sample of the stack trace for a thread.
+ */
+static void getSample(Thread* thread)
+{
+ /* Get old and new stack trace for thread. */
+ size_t newLength = 0;
+ const Method** newStackTrace = getStackTrace(thread, &newLength);
+ size_t oldLength = thread->stackTraceSampleLength;
+ const Method** oldStackTrace = thread->stackTraceSample;
+
+ /* Read time clocks to use for all events in this trace. */
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(thread, &cpuClockDiff, &wallClockDiff);
+ if (oldStackTrace == NULL) {
+ /*
+ * If there's no previous stack trace sample, log an entry event for
+ * every method in the trace.
+ */
+ for (int i = newLength - 1; i >= 0; --i) {
+ dvmMethodTraceAdd(thread, newStackTrace[i], METHOD_TRACE_ENTER,
+ cpuClockDiff, wallClockDiff);
+ }
+ } else {
+ /*
+ * If there's a previous stack trace, diff the traces and emit entry
+ * and exit events accordingly.
+ */
+ int diffIndexOld = oldLength - 1;
+ int diffIndexNew = newLength - 1;
+ /* Iterate bottom-up until there's a difference between traces. */
+ while (diffIndexOld >= 0 && diffIndexNew >= 0 &&
+ oldStackTrace[diffIndexOld] == newStackTrace[diffIndexNew]) {
+ diffIndexOld--;
+ diffIndexNew--;
+ }
+ /* Iterate top-down over old trace until diff, emitting exit events. */
+ for (int i = 0; i <= diffIndexOld; ++i) {
+ dvmMethodTraceAdd(thread, oldStackTrace[i], METHOD_TRACE_EXIT,
+ cpuClockDiff, wallClockDiff);
+ }
+ /* Iterate bottom-up over new trace from diff, emitting entry events. */
+ for (int i = diffIndexNew; i >= 0; --i) {
+ dvmMethodTraceAdd(thread, newStackTrace[i], METHOD_TRACE_ENTER,
+ cpuClockDiff, wallClockDiff);
+ }
+ }
+
+ /* Free the old stack trace and update the thread's stack trace sample. */
+ free(oldStackTrace);
+ thread->stackTraceSample = newStackTrace;
+ thread->stackTraceSampleLength = newLength;
+}
+
+/*
+ * Entry point for sampling thread. The sampling interval in microseconds is
+ * passed in as an argument.
+ */
+static void* runSamplingThread(void* arg)
+{
+ int intervalUs = (int) arg;
+ while (gDvm.methodTrace.traceEnabled) {
+ dvmSuspendAllThreads(SUSPEND_FOR_SAMPLING);
+
+ dvmLockThreadList(dvmThreadSelf());
+ for (Thread *thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+ getSample(thread);
+ }
+ dvmUnlockThreadList();
+
+ dvmResumeAllThreads(SUSPEND_FOR_SAMPLING);
+
+ usleep(intervalUs);
+ }
+ return NULL;
+}
+
+/*
* Boot-time init.
*/
bool dvmProfilingStartup()
@@ -282,6 +410,21 @@ static void resetCpuClockBase()
}
/*
+ * Free and reset the "stackTraceSample" field in all threads.
+ */
+static void freeThreadStackTraceSamples()
+{
+ Thread* thread;
+
+ dvmLockThreadList(NULL);
+ for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+ free(thread->stackTraceSample);
+ thread->stackTraceSample = NULL;
+ }
+ dvmUnlockThreadList();
+}
+
+/*
* Dump the thread list to the specified file.
*/
static void dumpThreadList(FILE* fp) {
@@ -359,7 +502,7 @@ static void dumpMethodList(FILE* fp)
* On failure, we throw an exception and return.
*/
void dvmMethodTraceStart(const char* traceFileName, int traceFd, int bufferSize,
- int flags, bool directToDdms)
+ int flags, bool directToDdms, bool samplingEnabled, int intervalUs)
{
MethodTraceState* state = &gDvm.methodTrace;
@@ -428,6 +571,8 @@ void dvmMethodTraceStart(const char* traceFileName, int traceFd, int bufferSize,
state->recordSize = TRACE_REC_SIZE_SINGLE_CLOCK;
}
+ state->samplingEnabled = samplingEnabled;
+
/*
* Output the header.
*/
@@ -452,7 +597,17 @@ void dvmMethodTraceStart(const char* traceFileName, int traceFd, int bufferSize,
* following to take a Thread* argument, and set the appropriate
* interpBreak flags only on the target thread.
*/
- updateActiveProfilers(kSubModeMethodTrace, true);
+ if (samplingEnabled) {
+ updateActiveProfilers(kSubModeSampleTrace, true);
+ /* Start the sampling thread. */
+ if (!dvmCreateInternalThread(&state->samplingThreadHandle,
+ "Sampling Thread", &runSamplingThread, (void*) intervalUs)) {
+ dvmThrowInternalError("failed to create sampling thread");
+ goto fail;
+ }
+ } else {
+ updateActiveProfilers(kSubModeMethodTrace, true);
+ }
dvmUnlockMutex(&state->startStopLock);
return;
@@ -500,7 +655,7 @@ static inline void measureClockOverhead()
{
#if defined(HAVE_POSIX_CLOCKS)
if (useThreadCpuClock()) {
- getThreadCpuTimeInUsec();
+ getThreadCpuTimeInUsec(dvmThreadSelf());
}
#endif
if (useWallClock()) {
@@ -536,12 +691,18 @@ static u4 getClockOverhead()
}
/*
- * Returns "true" if method tracing is currently active.
+ * Indicates if method tracing is active and what kind of tracing is active.
*/
-bool dvmIsMethodTraceActive()
+TracingMode dvmGetMethodTracingMode()
{
const MethodTraceState* state = &gDvm.methodTrace;
- return state->traceEnabled;
+ if (!state->traceEnabled) {
+ return TRACING_INACTIVE;
+ } else if (state->samplingEnabled) {
+ return SAMPLE_PROFILING_ACTIVE;
+ } else {
+ return METHOD_TRACING_ACTIVE;
+ }
}
/*
@@ -551,6 +712,7 @@ bool dvmIsMethodTraceActive()
void dvmMethodTraceStop()
{
MethodTraceState* state = &gDvm.methodTrace;
+ bool samplingEnabled = state->samplingEnabled;
u8 elapsed;
/*
@@ -565,7 +727,11 @@ void dvmMethodTraceStop()
dvmUnlockMutex(&state->startStopLock);
return;
} else {
- updateActiveProfilers(kSubModeMethodTrace, false);
+ if (samplingEnabled) {
+ updateActiveProfilers(kSubModeSampleTrace, false);
+ } else {
+ updateActiveProfilers(kSubModeMethodTrace, false);
+ }
}
/* compute elapsed time */
@@ -719,9 +885,42 @@ void dvmMethodTraceStop()
fclose(state->traceFile);
state->traceFile = NULL;
+ /* free and clear sampling traces held by all threads */
+ if (samplingEnabled) {
+ freeThreadStackTraceSamples();
+ }
+
/* wake any threads that were waiting for profiling to complete */
dvmBroadcastCond(&state->threadExitCond);
dvmUnlockMutex(&state->startStopLock);
+
+ /* make sure the sampling thread has stopped */
+ if (samplingEnabled &&
+ pthread_join(state->samplingThreadHandle, NULL) != 0) {
+ ALOGW("Sampling thread join failed");
+ }
+}
+
+/*
+ * Read clocks and generate time diffs for method trace events.
+ */
+void dvmMethodTraceReadClocks(Thread* self, u4* cpuClockDiff,
+ u4* wallClockDiff)
+{
+#if defined(HAVE_POSIX_CLOCKS)
+ if (useThreadCpuClock()) {
+ if (!self->cpuClockBaseSet) {
+ /* Initialize per-thread CPU clock base time on first use. */
+ self->cpuClockBase = getThreadCpuTimeInUsec(self);
+ self->cpuClockBaseSet = true;
+ } else {
+ *cpuClockDiff = getThreadCpuTimeInUsec(self) - self->cpuClockBase;
+ }
+ }
+#endif
+ if (useWallClock()) {
+ *wallClockDiff = getWallTimeInUsec() - gDvm.methodTrace.startWhen;
+ }
}
/*
@@ -730,7 +929,8 @@ void dvmMethodTraceStop()
* Multiple threads may be banging on this all at once. We use atomic ops
* rather than mutexes for speed.
*/
-void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
+void dvmMethodTraceAdd(Thread* self, const Method* method, int action,
+ u4 cpuClockDiff, u4 wallClockDiff)
{
MethodTraceState* state = &gDvm.methodTrace;
u4 methodVal;
@@ -739,21 +939,6 @@ void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
assert(method != NULL);
-#if defined(HAVE_POSIX_CLOCKS)
- /*
- * We can only access the per-thread CPU clock from within the
- * thread, so we have to initialize the base time on the first use.
- * (Looks like pthread_getcpuclockid(thread, &id) will do what we
- * want, but it doesn't appear to be defined on the device.)
- */
- if (!self->cpuClockBaseSet) {
- self->cpuClockBase = getThreadCpuTimeInUsec();
- self->cpuClockBaseSet = true;
- //ALOGI("thread base id=%d 0x%llx",
- // self->threadId, self->cpuClockBase);
- }
-#endif
-
/*
* Advance "curOffset" atomically.
*/
@@ -784,7 +969,6 @@ void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
#if defined(HAVE_POSIX_CLOCKS)
if (useThreadCpuClock()) {
- u4 cpuClockDiff = (u4) (getThreadCpuTimeInUsec() - self->cpuClockBase);
*ptr++ = (u1) cpuClockDiff;
*ptr++ = (u1) (cpuClockDiff >> 8);
*ptr++ = (u1) (cpuClockDiff >> 16);
@@ -793,7 +977,6 @@ void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
#endif
if (useWallClock()) {
- u4 wallClockDiff = (u4) (getWallTimeInUsec() - state->startWhen);
*ptr++ = (u1) wallClockDiff;
*ptr++ = (u1) (wallClockDiff >> 8);
*ptr++ = (u1) (wallClockDiff >> 16);
@@ -809,7 +992,11 @@ void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
void dvmFastMethodTraceEnter(const Method* method, Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
- dvmMethodTraceAdd(self, method, METHOD_TRACE_ENTER);
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(self, &cpuClockDiff, &wallClockDiff);
+ dvmMethodTraceAdd(self, method, METHOD_TRACE_ENTER, cpuClockDiff,
+ wallClockDiff);
}
}
@@ -821,8 +1008,11 @@ void dvmFastMethodTraceEnter(const Method* method, Thread* self)
void dvmFastMethodTraceExit(Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(self, &cpuClockDiff, &wallClockDiff);
dvmMethodTraceAdd(self, self->interpSave.method,
- METHOD_TRACE_EXIT);
+ METHOD_TRACE_EXIT, cpuClockDiff, wallClockDiff);
}
}
@@ -834,7 +1024,11 @@ void dvmFastMethodTraceExit(Thread* self)
void dvmFastNativeMethodTraceExit(const Method* method, Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
- dvmMethodTraceAdd(self, method, METHOD_TRACE_EXIT);
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(self, &cpuClockDiff, &wallClockDiff);
+ dvmMethodTraceAdd(self, method, METHOD_TRACE_EXIT, cpuClockDiff,
+ wallClockDiff);
}
}
diff --git a/vm/Profile.h b/vm/Profile.h
index 6c9e1c714..9059181a9 100644
--- a/vm/Profile.h
+++ b/vm/Profile.h
@@ -52,6 +52,9 @@ struct MethodTraceState {
int traceVersion;
size_t recordSize;
+
+ bool samplingEnabled;
+ pthread_t samplingThreadHandle;
};
/*
@@ -83,11 +86,20 @@ struct AllocProfState {
* Start/stop method tracing.
*/
void dvmMethodTraceStart(const char* traceFileName, int traceFd, int bufferSize,
- int flags, bool directToDdms);
-bool dvmIsMethodTraceActive(void);
+ int flags, bool directToDdms, bool samplingEnabled, int intervalUs);
void dvmMethodTraceStop(void);
/*
+ * Returns current method tracing mode.
+ */
+enum TracingMode {
+ TRACING_INACTIVE,
+ METHOD_TRACING_ACTIVE,
+ SAMPLE_PROFILING_ACTIVE,
+};
+TracingMode dvmGetMethodTracingMode(void);
+
+/*
* Start/stop emulator tracing.
*/
void dvmEmulatorTraceStart(void);
@@ -112,27 +124,45 @@ enum {
*/
#define TRACE_METHOD_ENTER(_self, _method) \
do { \
- if (_self->interpBreak.ctl.subMode & kSubModeMethodTrace) \
- dvmMethodTraceAdd(_self, _method, METHOD_TRACE_ENTER); \
+ if (_self->interpBreak.ctl.subMode & kSubModeMethodTrace) { \
+ u4 cpuClockDiff = 0; \
+ u4 wallClockDiff = 0; \
+ dvmMethodTraceReadClocks(_self, &cpuClockDiff, &wallClockDiff); \
+ dvmMethodTraceAdd(_self, _method, METHOD_TRACE_ENTER, \
+ cpuClockDiff, wallClockDiff); \
+ } \
if (_self->interpBreak.ctl.subMode & kSubModeEmulatorTrace) \
dvmEmitEmulatorTrace(_method, METHOD_TRACE_ENTER); \
} while(0);
#define TRACE_METHOD_EXIT(_self, _method) \
do { \
- if (_self->interpBreak.ctl.subMode & kSubModeMethodTrace) \
- dvmMethodTraceAdd(_self, _method, METHOD_TRACE_EXIT); \
+ if (_self->interpBreak.ctl.subMode & kSubModeMethodTrace) { \
+ u4 cpuClockDiff = 0; \
+ u4 wallClockDiff = 0; \
+ dvmMethodTraceReadClocks(_self, &cpuClockDiff, &wallClockDiff); \
+ dvmMethodTraceAdd(_self, _method, METHOD_TRACE_EXIT, \
+ cpuClockDiff, wallClockDiff); \
+ } \
if (_self->interpBreak.ctl.subMode & kSubModeEmulatorTrace) \
dvmEmitEmulatorTrace(_method, METHOD_TRACE_EXIT); \
} while(0);
#define TRACE_METHOD_UNROLL(_self, _method) \
do { \
- if (_self->interpBreak.ctl.subMode & kSubModeMethodTrace) \
- dvmMethodTraceAdd(_self, _method, METHOD_TRACE_UNROLL); \
+ if (_self->interpBreak.ctl.subMode & kSubModeMethodTrace) { \
+ u4 cpuClockDiff = 0; \
+ u4 wallClockDiff = 0; \
+ dvmMethodTraceReadClocks(_self, &cpuClockDiff, &wallClockDiff); \
+ dvmMethodTraceAdd(_self, _method, METHOD_TRACE_UNROLL, \
+ cpuClockDiff, wallClockDiff); \
+ } \
if (_self->interpBreak.ctl.subMode & kSubModeEmulatorTrace) \
dvmEmitEmulatorTrace(_method, METHOD_TRACE_UNROLL); \
} while(0);
-void dvmMethodTraceAdd(struct Thread* self, const Method* method, int action);
+void dvmMethodTraceReadClocks(Thread* self, u4* cpuClockDiff,
+ u4* wallClockDiff);
+void dvmMethodTraceAdd(struct Thread* self, const Method* method, int action,
+ u4 cpuClockDiff, u4 wallClockDiff);
void dvmEmitEmulatorTrace(const Method* method, int action);
void dvmMethodTraceGCBegin(void);
diff --git a/vm/Thread.cpp b/vm/Thread.cpp
index 9671b84c7..1ebfca74c 100644
--- a/vm/Thread.cpp
+++ b/vm/Thread.cpp
@@ -997,6 +997,7 @@ static void freeThread(Thread* thread)
#if defined(WITH_SELF_VERIFICATION)
dvmSelfVerificationShadowSpaceFree(thread);
#endif
+ free(thread->stackTraceSample);
free(thread);
}
@@ -1281,6 +1282,7 @@ bool dvmCreateInterpThread(Object* threadObj, int reqStackSize)
"thread has already been started");
freeThread(newThread);
dvmReleaseTrackedAlloc(vmThreadObj, NULL);
+ return false;
}
/*
@@ -3598,7 +3600,7 @@ void dvmNukeThread(Thread* thread)
ALOGD("Sent, pausing to let debuggerd run");
usleep(8 * 1000 * 1000); // TODO: timed-wait until debuggerd finishes
- /* ignore SIGSEGV so the eventual dmvAbort() doesn't notify debuggerd */
+ /* ignore SIGSEGV so the eventual dvmAbort() doesn't notify debuggerd */
signal(SIGSEGV, SIG_IGN);
ALOGD("Continuing");
}
diff --git a/vm/Thread.h b/vm/Thread.h
index 8deef6e67..19bd49c3b 100644
--- a/vm/Thread.h
+++ b/vm/Thread.h
@@ -287,6 +287,10 @@ struct Thread {
bool cpuClockBaseSet;
u8 cpuClockBase;
+ /* previous stack trace sample and length (used by sampling profiler) */
+ const Method** stackTraceSample;
+ size_t stackTraceSampleLength;
+
/* memory allocation profiling state */
AllocProfState allocProf;
@@ -358,6 +362,7 @@ enum SuspendCause {
SUSPEND_FOR_DEX_OPT,
SUSPEND_FOR_VERIFY,
SUSPEND_FOR_HPROF,
+ SUSPEND_FOR_SAMPLING,
#if defined(WITH_JIT)
SUSPEND_FOR_TBL_RESIZE, // jit-table resize
SUSPEND_FOR_IC_PATCH, // polymorphic callsite inline-cache patch
diff --git a/vm/alloc/Alloc.cpp b/vm/alloc/Alloc.cpp
index d3f8dae93..4eb1b855b 100644
--- a/vm/alloc/Alloc.cpp
+++ b/vm/alloc/Alloc.cpp
@@ -17,9 +17,12 @@
* Garbage-collecting memory allocator.
*/
#include "Dalvik.h"
+#include "Globals.h"
#include "alloc/Heap.h"
#include "alloc/HeapInternal.h"
#include "alloc/HeapSource.h"
+#include "cutils/atomic.h"
+#include "cutils/atomic-inline.h"
/*
* Initialize the GC universe.
@@ -310,6 +313,17 @@ void dvmCollectGarbage()
dvmUnlockHeap();
}
+/*
+ * Run finalization.
+ */
+void dvmRunFinalization() {
+ Thread *self = dvmThreadSelf();
+ assert(self != NULL);
+ JValue unusedResult;
+ assert(gDvm.methJavaLangSystem_runFinalization != NULL);
+ dvmCallMethod(self, gDvm.methJavaLangSystem_runFinalization, NULL, &unusedResult);
+}
+
struct CountContext {
const ClassObject *clazz;
size_t count;
diff --git a/vm/alloc/Alloc.h b/vm/alloc/Alloc.h
index 4bcd45834..c9a23f97d 100644
--- a/vm/alloc/Alloc.h
+++ b/vm/alloc/Alloc.h
@@ -145,6 +145,11 @@ int dvmGetTargetHeapConcurrentStart();
void dvmCollectGarbage(void);
/*
+ * Calls System.runFinalization().
+ */
+void dvmRunFinalization();
+
+/*
* Returns a count of the direct instances of a class.
*/
size_t dvmCountInstancesOfClass(const ClassObject *clazz);
diff --git a/vm/alloc/CardTable.cpp b/vm/alloc/CardTable.cpp
index 2c81fd149..87143dc67 100644
--- a/vm/alloc/CardTable.cpp
+++ b/vm/alloc/CardTable.cpp
@@ -54,9 +54,12 @@ bool dvmCardTableStartup(size_t heapMaximumSize, size_t growthLimit)
void *allocBase;
u1 *biasedBase;
GcHeap *gcHeap = gDvm.gcHeap;
+ int offset;
void *heapBase = dvmHeapSourceGetBase();
assert(gcHeap != NULL);
assert(heapBase != NULL);
+ /* All zeros is the correct initial value; all clean. */
+ assert(GC_CARD_CLEAN == 0);
/* Set up the card table */
length = heapMaximumSize / GC_CARD_SIZE;
@@ -69,17 +72,11 @@ bool dvmCardTableStartup(size_t heapMaximumSize, size_t growthLimit)
gcHeap->cardTableBase = (u1*)allocBase;
gcHeap->cardTableLength = growthLimit / GC_CARD_SIZE;
gcHeap->cardTableMaxLength = length;
- gcHeap->cardTableOffset = 0;
- /* All zeros is the correct initial value; all clean. */
- assert(GC_CARD_CLEAN == 0);
-
biasedBase = (u1 *)((uintptr_t)allocBase -
- ((uintptr_t)heapBase >> GC_CARD_SHIFT));
- if (((uintptr_t)biasedBase & 0xff) != GC_CARD_DIRTY) {
- int offset = GC_CARD_DIRTY - ((uintptr_t)biasedBase & 0xff);
- gcHeap->cardTableOffset = offset + (offset < 0 ? 0x100 : 0);
- biasedBase += gcHeap->cardTableOffset;
- }
+ ((uintptr_t)heapBase >> GC_CARD_SHIFT));
+ offset = GC_CARD_DIRTY - ((uintptr_t)biasedBase & 0xff);
+ gcHeap->cardTableOffset = offset + (offset < 0 ? 0x100 : 0);
+ biasedBase += gcHeap->cardTableOffset;
assert(((uintptr_t)biasedBase & 0xff) == GC_CARD_DIRTY);
gDvm.biasedCardTableBase = biasedBase;
@@ -136,21 +133,20 @@ void dvmClearCardTable()
*/
assert(gDvm.gcHeap->cardTableBase != NULL);
-#if 1
- // zero out cards with memset(), using liveBits as an estimate
- const HeapBitmap* liveBits = dvmHeapSourceGetLiveBits();
- size_t maxLiveCard = (liveBits->max - liveBits->base) / GC_CARD_SIZE;
- maxLiveCard = ALIGN_UP_TO_PAGE_SIZE(maxLiveCard);
- if (maxLiveCard > gDvm.gcHeap->cardTableLength) {
- maxLiveCard = gDvm.gcHeap->cardTableLength;
- }
+ if (gDvm.lowMemoryMode) {
+ // zero out cards with madvise(), discarding all pages in the card table
+ madvise(gDvm.gcHeap->cardTableBase, gDvm.gcHeap->cardTableLength, MADV_DONTNEED);
+ } else {
+ // zero out cards with memset(), using liveBits as an estimate
+ const HeapBitmap* liveBits = dvmHeapSourceGetLiveBits();
+ size_t maxLiveCard = (liveBits->max - liveBits->base) / GC_CARD_SIZE;
+ maxLiveCard = ALIGN_UP_TO_PAGE_SIZE(maxLiveCard);
+ if (maxLiveCard > gDvm.gcHeap->cardTableLength) {
+ maxLiveCard = gDvm.gcHeap->cardTableLength;
+ }
- memset(gDvm.gcHeap->cardTableBase, GC_CARD_CLEAN, maxLiveCard);
-#else
- // zero out cards with madvise(), discarding all pages in the card table
- madvise(gDvm.gcHeap->cardTableBase, gDvm.gcHeap->cardTableLength,
- MADV_DONTNEED);
-#endif
+ memset(gDvm.gcHeap->cardTableBase, GC_CARD_CLEAN, maxLiveCard);
+ }
}
/*
@@ -165,7 +161,7 @@ bool dvmIsValidCard(const u1 *cardAddr)
}
/*
- * Returns the address of the relevent byte in the card table, given
+ * Returns the address of the relevant byte in the card table, given
* an address on the heap.
*/
u1 *dvmCardFromAddr(const void *addr)
diff --git a/vm/alloc/Heap.cpp b/vm/alloc/Heap.cpp
index 5e5dd9a8a..5f860790c 100644
--- a/vm/alloc/Heap.cpp
+++ b/vm/alloc/Heap.cpp
@@ -30,8 +30,9 @@
#include "alloc/MarkSweep.h"
#include "os/os.h"
-#include <sys/time.h>
+#include <sys/mman.h>
#include <sys/resource.h>
+#include <sys/time.h>
#include <limits.h>
#include <errno.h>
diff --git a/vm/alloc/HeapSource.cpp b/vm/alloc/HeapSource.cpp
index f6c2464bc..8f1c4cbe2 100644
--- a/vm/alloc/HeapSource.cpp
+++ b/vm/alloc/HeapSource.cpp
@@ -17,6 +17,7 @@
#include <stdint.h>
#include <sys/mman.h>
#include <errno.h>
+#include <cutils/ashmem.h>
#define SIZE_MAX UINT_MAX // TODO: get SIZE_MAX from stdint.h
@@ -28,6 +29,7 @@
#include "alloc/HeapBitmap.h"
#include "alloc/HeapBitmapInlines.h"
+static void dvmHeapSourceUpdateMaxNativeFootprint();
static void snapIdealFootprint();
static void setIdealFootprint(size_t max);
static size_t getMaximumSize(const HeapSource *hs);
@@ -179,6 +181,14 @@ struct HeapSource {
HeapBitmap markBits;
/*
+ * Native allocations.
+ */
+ int32_t nativeBytesAllocated;
+ size_t nativeFootprintGCWatermark;
+ size_t nativeFootprintLimit;
+ bool nativeNeedToRunFinalization;
+
+ /*
* State for the GC daemon.
*/
bool hasGcThread;
@@ -379,6 +389,36 @@ static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
}
/*
+ * A helper for addNewHeap(). Remap the new heap so that it will have
+ * a separate ashmem region with possibly a different name, etc. In
+ * practice, this is used to give the app heap a separate ashmem
+ * region from the zygote heap's.
+ */
+static bool remapNewHeap(HeapSource* hs, Heap* newHeap)
+{
+ char* newHeapBase = newHeap->base;
+ size_t rem_size = hs->heapBase + hs->heapLength - newHeapBase;
+ munmap(newHeapBase, rem_size);
+ int fd = ashmem_create_region("dalvik-heap", rem_size);
+ if (fd == -1) {
+ ALOGE("Unable to create an ashmem region for the new heap");
+ return false;
+ }
+ void* addr = mmap(newHeapBase, rem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+ int ret = close(fd);
+ if (addr == MAP_FAILED) {
+ ALOGE("Unable to map an ashmem region for the new heap");
+ return false;
+ }
+ if (ret == -1) {
+ ALOGE("Unable to close fd for the ashmem region for the new heap");
+ munmap(newHeapBase, rem_size);
+ return false;
+ }
+ return true;
+}
+
+/*
* Adds an additional heap to the heap source. Returns false if there
* are too many heaps or insufficient free space to add another heap.
*/
@@ -416,6 +456,9 @@ static bool addNewHeap(HeapSource *hs)
heap.base = base;
heap.limit = heap.base + heap.maximumSize;
heap.brk = heap.base + morecoreStart;
+ if (!remapNewHeap(hs, &heap)) {
+ return false;
+ }
heap.msp = createMspace(base, morecoreStart, hs->minFree);
if (heap.msp == NULL) {
return false;
@@ -570,7 +613,7 @@ GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
* among the heaps managed by the garbage collector.
*/
length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
- base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
+ base = dvmAllocRegion(length, PROT_NONE, gDvm.zygote ? "dalvik-zygote" : "dalvik-heap");
if (base == NULL) {
return NULL;
}
@@ -606,6 +649,10 @@ GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
hs->softLimit = SIZE_MAX; // no soft limit at first
hs->numHeaps = 0;
hs->sawZygote = gDvm.zygote;
+ hs->nativeBytesAllocated = 0;
+ hs->nativeFootprintGCWatermark = startSize;
+ hs->nativeFootprintLimit = startSize * 2;
+ hs->nativeNeedToRunFinalization = false;
hs->hasGcThread = false;
hs->heapBase = (char *)base;
hs->heapLength = length;
@@ -892,10 +939,45 @@ void* dvmHeapSourceAlloc(size_t n)
FRACTIONAL_MB(hs->softLimit), n);
return NULL;
}
- void* ptr = mspace_calloc(heap->msp, 1, n);
- if (ptr == NULL) {
- return NULL;
+ void* ptr;
+ if (gDvm.lowMemoryMode) {
+ /* This is only necessary because mspace_calloc always memsets the
+ * allocated memory to 0. This is bad for memory usage since it leads
+ * to dirty zero pages. If low memory mode is enabled, we use
+ * mspace_malloc which doesn't memset the allocated memory and madvise
+ * the page aligned region back to the kernel.
+ */
+ ptr = mspace_malloc(heap->msp, n);
+ if (ptr == NULL) {
+ return NULL;
+ }
+ uintptr_t zero_begin = (uintptr_t)ptr;
+ uintptr_t zero_end = (uintptr_t)ptr + n;
+ /* Calculate the page aligned region.
+ */
+ uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
+ uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
+ /* If our allocation spans more than one page, we attempt to madvise.
+ */
+ if (begin < end) {
+ /* madvise the page aligned region to kernel.
+ */
+ madvise((void*)begin, end - begin, MADV_DONTNEED);
+ /* Zero the region after the page aligned region.
+ */
+ memset((void*)end, 0, zero_end - end);
+ /* Zero out the region before the page aligned region.
+ */
+ zero_end = begin;
+ }
+ memset((void*)zero_begin, 0, zero_end - zero_begin);
+ } else {
+ ptr = mspace_calloc(heap->msp, 1, n);
+ if (ptr == NULL) {
+ return NULL;
+ }
}
+
countAllocation(heap, ptr);
/*
* Check to see if a concurrent GC should be initiated.
@@ -1384,6 +1466,11 @@ void dvmHeapSourceGrowForUtilization()
//of free to start concurrent GC
heap->concurrentStartBytes = freeBytes - MIN(freeBytes * (float)(0.2), concurrentStart);
}
+
+ /* Mark that we need to run finalizers and update the native watermarks
+ * next time we attempt to register a native allocation.
+ */
+ gHs->nativeNeedToRunFinalization = true;
}
/*
@@ -1478,3 +1565,95 @@ void *dvmHeapSourceGetImmuneLimit(bool isPartial)
return NULL;
}
}
+
+static void dvmHeapSourceUpdateMaxNativeFootprint()
+{
+ /* Use the current target utilization ratio to determine the new native GC
+ * watermarks.
+ */
+ size_t nativeSize = gHs->nativeBytesAllocated;
+ size_t targetSize =
+ (nativeSize / gHs->targetUtilization) * HEAP_UTILIZATION_MAX;
+
+ if (targetSize > nativeSize + gHs->maxFree) {
+ targetSize = nativeSize + gHs->maxFree;
+ } else if (targetSize < nativeSize + gHs->minFree) {
+ targetSize = nativeSize + gHs->minFree;
+ }
+ gHs->nativeFootprintGCWatermark = targetSize;
+ gHs->nativeFootprintLimit = 2 * targetSize - nativeSize;
+}
+
+void dvmHeapSourceRegisterNativeAllocation(int bytes)
+{
+ /* If we have just done a GC, ensure that the finalizers are done and update
+ * the native watermarks.
+ */
+ if (gHs->nativeNeedToRunFinalization) {
+ dvmRunFinalization();
+ dvmHeapSourceUpdateMaxNativeFootprint();
+ gHs->nativeNeedToRunFinalization = false;
+ }
+
+ android_atomic_add(bytes, &gHs->nativeBytesAllocated);
+
+ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) {
+ /* The second watermark is higher than the gc watermark. If you hit
+ * this it means you are allocating native objects faster than the GC
+ * can keep up with. If this occurs, we do a GC for alloc.
+ */
+ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
+ Thread* self = dvmThreadSelf();
+ dvmRunFinalization();
+ if (dvmCheckException(self)) {
+ return;
+ }
+ dvmLockHeap();
+ bool waited = dvmWaitForConcurrentGcToComplete();
+ dvmUnlockHeap();
+ if (waited) {
+ // Just finished a GC, attempt to run finalizers.
+ dvmRunFinalization();
+ if (dvmCheckException(self)) {
+ return;
+ }
+ }
+
+ // If we still are over the watermark, attempt a GC for alloc and run finalizers.
+ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
+ dvmLockHeap();
+ dvmWaitForConcurrentGcToComplete();
+ dvmCollectGarbageInternal(GC_FOR_MALLOC);
+ dvmUnlockHeap();
+ dvmRunFinalization();
+ gHs->nativeNeedToRunFinalization = false;
+ if (dvmCheckException(self)) {
+ return;
+ }
+ }
+ /* We have just run finalizers, update the native watermark since
+ * it is very likely that finalizers released native managed
+ * allocations.
+ */
+ dvmHeapSourceUpdateMaxNativeFootprint();
+ } else {
+ dvmSignalCond(&gHs->gcThreadCond);
+ }
+ }
+}
+
+/*
+ * Called from VMRuntime.registerNativeFree.
+ */
+void dvmHeapSourceRegisterNativeFree(int bytes)
+{
+ int expected_size, new_size;
+ do {
+ expected_size = gHs->nativeBytesAllocated;
+ new_size = expected_size - bytes;
+ if (new_size < 0) {
+ break;
+ }
+ } while (android_atomic_cas(expected_size, new_size,
+ &gHs->nativeBytesAllocated));
+}
diff --git a/vm/alloc/HeapSource.h b/vm/alloc/HeapSource.h
index e1f682075..42fccb266 100644
--- a/vm/alloc/HeapSource.h
+++ b/vm/alloc/HeapSource.h
@@ -200,4 +200,14 @@ void *dvmHeapSourceGetImmuneLimit(bool isPartial);
*/
size_t dvmHeapSourceGetMaximumSize(void);
+/*
+ * Called from VMRuntime.registerNativeAllocation.
+ */
+void dvmHeapSourceRegisterNativeAllocation(int bytes);
+
+/*
+ * Called from VMRuntime.registerNativeFree.
+ */
+void dvmHeapSourceRegisterNativeFree(int bytes);
+
#endif // DALVIK_HEAP_SOURCE_H_
diff --git a/vm/alloc/MarkSweep.cpp b/vm/alloc/MarkSweep.cpp
index eb739e552..2781a7cfa 100644
--- a/vm/alloc/MarkSweep.cpp
+++ b/vm/alloc/MarkSweep.cpp
@@ -558,8 +558,9 @@ static void scanGrayObjects(GcMarkContext *ctx)
const u1 *base, *limit, *ptr, *dirty;
base = &h->cardTableBase[0];
- limit = dvmCardFromAddr((u1 *)dvmHeapSourceGetLimit());
- assert(limit <= &h->cardTableBase[h->cardTableLength]);
+ // The limit is the card one after the last accessible card.
+ limit = dvmCardFromAddr((u1 *)dvmHeapSourceGetLimit() - GC_CARD_SIZE) + 1;
+ assert(limit <= &base[h->cardTableOffset + h->cardTableLength]);
ptr = base;
for (;;) {
diff --git a/vm/analysis/Liveness.cpp b/vm/analysis/Liveness.cpp
index 18d5a1584..361d3cb39 100644
--- a/vm/analysis/Liveness.cpp
+++ b/vm/analysis/Liveness.cpp
@@ -62,7 +62,7 @@ bool dvmComputeLiveness(VerifierData* vdata)
const InsnFlags* insnFlags = vdata->insnFlags;
InstructionWidth* backwardWidth;
VfyBasicBlock* startGuess = NULL;
- BitVector* workBits;
+ BitVector* workBits = NULL;
bool result = false;
bool verbose = false; //= dvmWantVerboseVerification(vdata->method);
@@ -273,6 +273,7 @@ bool dvmComputeLiveness(VerifierData* vdata)
bail:
free(backwardWidth);
+ dvmFreeBitVector(workBits);
return result;
}
diff --git a/vm/analysis/Optimize.cpp b/vm/analysis/Optimize.cpp
index 953924eb4..b61b82c18 100644
--- a/vm/analysis/Optimize.cpp
+++ b/vm/analysis/Optimize.cpp
@@ -83,6 +83,7 @@ bool dvmCreateInlineSubsTable()
ALOGE("Unable to find method for inlining: %s.%s:%s",
ops[i].classDescriptor, ops[i].methodName,
ops[i].methodSignature);
+ free(table);
return false;
}
@@ -374,16 +375,16 @@ void dvmUpdateCodeUnit(const Method* meth, u2* ptr, u2 newVal)
* 16-bit op, we convert the opcode from "packed" form (e.g. 0x0108) to
* bytecode form (e.g. 0x08ff).
*/
-static inline void updateOpcode(const Method* meth, u2* ptr, Opcode opcode)
+static inline void updateOpcode(const Method* meth, u2* ptr, u2 opcode)
{
if (opcode >= 256) {
/* opcode low byte becomes high byte, low byte becomes 0xff */
assert((ptr[0] & 0xff) == 0xff);
- dvmUpdateCodeUnit(meth, ptr, (u2) (opcode << 8) | 0x00ff);
+ dvmUpdateCodeUnit(meth, ptr, (opcode << 8) | 0x00ff);
} else {
/* 8-bit op, just replace the low byte */
assert((ptr[0] & 0xff) != 0xff);
- dvmUpdateCodeUnit(meth, ptr, (ptr[0] & 0xff00) | (u2) opcode);
+ dvmUpdateCodeUnit(meth, ptr, (ptr[0] & 0xff00) | opcode);
}
}
diff --git a/vm/analysis/RegisterMap.cpp b/vm/analysis/RegisterMap.cpp
index c62ec4760..197fb7ab4 100644
--- a/vm/analysis/RegisterMap.cpp
+++ b/vm/analysis/RegisterMap.cpp
@@ -508,6 +508,8 @@ static bool verifyMap(VerifierData* vdata, const RegisterMap* pMap)
/* shouldn't happen */
ALOGE("GLITCH: bad format (%d)", format);
dvmAbort();
+ /* Make compiler happy */
+ addr = 0;
}
const RegType* regs = vdata->registerLines[addr].regTypes;
diff --git a/vm/analysis/VfyBasicBlock.cpp b/vm/analysis/VfyBasicBlock.cpp
index d6c4b79df..55aa6d486 100644
--- a/vm/analysis/VfyBasicBlock.cpp
+++ b/vm/analysis/VfyBasicBlock.cpp
@@ -544,6 +544,9 @@ void dvmFreeVfyBasicBlocks(VerifierData* vdata)
continue;
dvmPointerSetFree(block->predecessors);
+ dvmFreeBitVector(block->liveRegs);
free(block);
}
+
+ free(vdata->basicBlocks);
}
diff --git a/vm/compiler/Compiler.cpp b/vm/compiler/Compiler.cpp
index 188027f3d..f5b96b1ef 100644
--- a/vm/compiler/Compiler.cpp
+++ b/vm/compiler/Compiler.cpp
@@ -27,7 +27,7 @@
#endif
extern "C" void dvmCompilerTemplateStart(void);
-extern "C" void dmvCompilerTemplateEnd(void);
+extern "C" void dvmCompilerTemplateEnd(void);
static inline bool workQueueLength(void)
{
@@ -182,7 +182,7 @@ bool dvmCompilerSetupCodeCache(void)
MAP_PRIVATE , fd, 0);
close(fd);
if (gDvmJit.codeCache == MAP_FAILED) {
- ALOGE("Failed to mmap the JIT code cache: %s", strerror(errno));
+ ALOGE("Failed to mmap the JIT code cache of size %d: %s", gDvmJit.codeCacheSize, strerror(errno));
return false;
}
@@ -193,7 +193,7 @@ bool dvmCompilerSetupCodeCache(void)
#ifndef ARCH_IA32
/* Copy the template code into the beginning of the code cache */
- int templateSize = (intptr_t) dmvCompilerTemplateEnd -
+ int templateSize = (intptr_t) dvmCompilerTemplateEnd -
(intptr_t) dvmCompilerTemplateStart;
memcpy((void *) gDvmJit.codeCache,
(void *) dvmCompilerTemplateStart,
@@ -213,14 +213,6 @@ bool dvmCompilerSetupCodeCache(void)
/* Only flush the part in the code cache that is being used now */
dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
(intptr_t) gDvmJit.codeCache + templateSize, 0);
-
- int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
- PROTECT_CODE_CACHE_ATTRS);
-
- if (result == -1) {
- ALOGE("Failed to remove the write permission for the code cache");
- dvmAbort();
- }
#else
gDvmJit.codeCacheByteUsed = 0;
stream = (char*)gDvmJit.codeCache + gDvmJit.codeCacheByteUsed;
@@ -232,6 +224,14 @@ bool dvmCompilerSetupCodeCache(void)
ALOGV("stream = %p after initJIT", stream);
#endif
+ int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
+ PROTECT_CODE_CACHE_ATTRS);
+
+ if (result == -1) {
+ ALOGE("Failed to remove the write permission for the code cache");
+ dvmAbort();
+ }
+
return true;
}
diff --git a/vm/compiler/Loop.cpp b/vm/compiler/Loop.cpp
index f82668628..dc04a1135 100644
--- a/vm/compiler/Loop.cpp
+++ b/vm/compiler/Loop.cpp
@@ -352,7 +352,7 @@ static bool doLoopBodyCodeMotion(CompilationUnit *cUnit)
dvmCompilerDataFlowAttributes[mir->dalvikInsn.opcode];
/* Skip extended MIR instructions */
- if (dInsn->opcode >= kNumPackedOpcodes) continue;
+ if ((u2) dInsn->opcode >= kNumPackedOpcodes) continue;
int instrFlags = dexGetFlagsFromOpcode(dInsn->opcode);
diff --git a/vm/compiler/codegen/arm/ArmLIR.h b/vm/compiler/codegen/arm/ArmLIR.h
index cbd4c70d9..e159aecdb 100644
--- a/vm/compiler/codegen/arm/ArmLIR.h
+++ b/vm/compiler/codegen/arm/ArmLIR.h
@@ -627,6 +627,8 @@ typedef enum ArmOpcode {
kThumb2Dmb, /* dmb [1111001110111111100011110101] option[3-0] */
kThumb2LdrPcReln12, /* ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12]
imm12[11-0] */
+ kThumb2RsbRRR, /* rsb [111010111101] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
kThumbUndefined, /* undefined [11011110xxxxxxxx] */
kArmLast,
} ArmOpcode;
diff --git a/vm/compiler/codegen/arm/Assemble.cpp b/vm/compiler/codegen/arm/Assemble.cpp
index 7406d3e88..10572eb5f 100644
--- a/vm/compiler/codegen/arm/Assemble.cpp
+++ b/vm/compiler/codegen/arm/Assemble.cpp
@@ -881,6 +881,11 @@ ArmEncodingMap EncodingMap[kArmLast] = {
kFmtUnused, -1, -1,
IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
"ldr", "r!0d, [r15pc, -#!1d]", 2),
+ ENCODING_MAP(kThumb2RsbRRR, 0xebd00000, /* setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "rsb", "r!0d, r!1d, r!2d!3H", 2),
ENCODING_MAP(kThumbUndefined, 0xde00,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND,
@@ -2148,6 +2153,8 @@ void dvmCompilerSortAndPrintTraceProfiles()
}
ALOGD("JIT: Average execution count -> %d",(int)(sum / numTraces));
+ // How efficiently are we using code cache memory? Bigger is better.
+ ALOGD("JIT: CodeCache efficiency -> %.2f",(float)sum / (float)gDvmJit.codeCacheByteUsed);
/* Dump the sorted entries. The count of each trace will be reset to 0. */
for (i=0; i < gDvmJit.jitTableSize; i++) {
diff --git a/vm/compiler/codegen/arm/CodegenDriver.cpp b/vm/compiler/codegen/arm/CodegenDriver.cpp
index 1585aa0d0..de53b00fb 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.cpp
+++ b/vm/compiler/codegen/arm/CodegenDriver.cpp
@@ -2257,10 +2257,7 @@ static bool handleEasyMultiply(CompilationUnit *cUnit,
} else {
// Reverse subtract: (src << (shift + 1)) - src.
assert(powerOfTwoMinusOne);
- // TODO: rsb dst, src, src lsl#lowestSetBit(lit + 1)
- int tReg = dvmCompilerAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+ genMultiplyByShiftAndReverseSubtract(cUnit, rlSrc, rlResult, lowestSetBit(lit + 1));
}
storeValue(cUnit, rlDest, rlResult);
return true;
diff --git a/vm/compiler/codegen/arm/Thumb/Gen.cpp b/vm/compiler/codegen/arm/Thumb/Gen.cpp
index abc4420f7..622f47eff 100644
--- a/vm/compiler/codegen/arm/Thumb/Gen.cpp
+++ b/vm/compiler/codegen/arm/Thumb/Gen.cpp
@@ -274,3 +274,11 @@ static void genMultiplyByTwoBitMultiplier(CompilationUnit *cUnit,
// to do a regular multiply.
opRegRegImm(cUnit, kOpMul, rlResult.lowReg, rlSrc.lowReg, lit);
}
+
+static void genMultiplyByShiftAndReverseSubtract(CompilationUnit *cUnit,
+ RegLocation rlSrc, RegLocation rlResult, int lit)
+{
+ int tReg = dvmCompilerAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lit);
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+}
diff --git a/vm/compiler/codegen/arm/Thumb2/Gen.cpp b/vm/compiler/codegen/arm/Thumb2/Gen.cpp
index aca99e717..df37478e3 100644
--- a/vm/compiler/codegen/arm/Thumb2/Gen.cpp
+++ b/vm/compiler/codegen/arm/Thumb2/Gen.cpp
@@ -452,3 +452,10 @@ static void genMultiplyByTwoBitMultiplier(CompilationUnit *cUnit,
opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
}
}
+
+static void genMultiplyByShiftAndReverseSubtract(CompilationUnit *cUnit,
+ RegLocation rlSrc, RegLocation rlResult, int lit)
+{
+ newLIR4(cUnit, kThumb2RsbRRR, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
+ encodeShift(kArmLsl, lit));
+}
diff --git a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.cpp b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.cpp
index 713ecfa10..6c89b11e9 100644
--- a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.cpp
+++ b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.cpp
@@ -55,7 +55,13 @@ bool dvmCompilerArchVariantInit(void)
if (gDvmJit.threshold == 0) {
gDvmJit.threshold = 200;
}
- gDvmJit.codeCacheSize = 512*1024;
+ if (gDvmJit.codeCacheSize == DEFAULT_CODE_CACHE_SIZE) {
+ gDvmJit.codeCacheSize = 512 * 1024;
+ } else if ((gDvmJit.codeCacheSize == 0) && (gDvm.executionMode == kExecutionModeJit)) {
+ gDvm.executionMode = kExecutionModeInterpFast;
+ }
+ /* Hard limit for Arm of 2M */
+ assert(gDvmJit.codeCacheSize <= 2 * 1024 * 1024);
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking mode */
diff --git a/vm/compiler/codegen/arm/armv5te/ArchVariant.cpp b/vm/compiler/codegen/arm/armv5te/ArchVariant.cpp
index 25d650e6d..5c7fbbe41 100644
--- a/vm/compiler/codegen/arm/armv5te/ArchVariant.cpp
+++ b/vm/compiler/codegen/arm/armv5te/ArchVariant.cpp
@@ -55,7 +55,13 @@ bool dvmCompilerArchVariantInit(void)
if (gDvmJit.threshold == 0) {
gDvmJit.threshold = 200;
}
- gDvmJit.codeCacheSize = 512*1024;
+ if (gDvmJit.codeCacheSize == DEFAULT_CODE_CACHE_SIZE) {
+ gDvmJit.codeCacheSize = 512 * 1024;
+ } else if ((gDvmJit.codeCacheSize == 0) && (gDvm.executionMode == kExecutionModeJit)) {
+ gDvm.executionMode = kExecutionModeInterpFast;
+ }
+ /* Hard limit for Arm of 2M */
+ assert(gDvmJit.codeCacheSize <= 2 * 1024 * 1024);
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking mode */
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.cpp b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.cpp
index 40ee04072..a81a2e735 100644
--- a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.cpp
+++ b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.cpp
@@ -27,7 +27,7 @@ JitInstructionSetType dvmCompilerInstructionSet(void)
/* First, declare dvmCompiler_TEMPLATE_XXX for each template */
#define JIT_TEMPLATE(X) extern "C" void dvmCompiler_TEMPLATE_##X();
-#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#include "../../../template/armv7-a-neon/TemplateOpList.h"
#undef JIT_TEMPLATE
/* Architecture-specific initializations and checks go here */
@@ -41,7 +41,7 @@ bool dvmCompilerArchVariantInit(void)
*/
#define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \
(intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart;
-#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#include "../../../template/armv7-a-neon/TemplateOpList.h"
#undef JIT_TEMPLATE
/* Target-specific configuration */
@@ -50,7 +50,13 @@ bool dvmCompilerArchVariantInit(void)
if (gDvmJit.threshold == 0) {
gDvmJit.threshold = 40;
}
- gDvmJit.codeCacheSize = 1024*1024;
+ if (gDvmJit.codeCacheSize == DEFAULT_CODE_CACHE_SIZE) {
+ gDvmJit.codeCacheSize = 1500 * 1024;
+ } else if ((gDvmJit.codeCacheSize == 0) && (gDvm.executionMode == kExecutionModeJit)) {
+ gDvm.executionMode = kExecutionModeInterpFast;
+ }
+ /* Hard limit for Arm of 2M */
+ assert(gDvmJit.codeCacheSize <= 2 * 1024 * 1024);
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking */
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.h b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.h
index 33e262cec..0cb82f654 100644
--- a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.h
+++ b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.h
@@ -20,7 +20,7 @@
/* Create the TemplateOpcode enum */
#define JIT_TEMPLATE(X) TEMPLATE_##X,
typedef enum {
-#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#include "../../../template/armv7-a-neon/TemplateOpList.h"
/*
* For example,
* TEMPLATE_CMP_LONG,
diff --git a/vm/compiler/codegen/arm/armv7-a/ArchVariant.cpp b/vm/compiler/codegen/arm/armv7-a/ArchVariant.cpp
index 40ee04072..72ae3ce9c 100644
--- a/vm/compiler/codegen/arm/armv7-a/ArchVariant.cpp
+++ b/vm/compiler/codegen/arm/armv7-a/ArchVariant.cpp
@@ -27,7 +27,7 @@ JitInstructionSetType dvmCompilerInstructionSet(void)
/* First, declare dvmCompiler_TEMPLATE_XXX for each template */
#define JIT_TEMPLATE(X) extern "C" void dvmCompiler_TEMPLATE_##X();
-#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#include "../../../template/armv7-a/TemplateOpList.h"
#undef JIT_TEMPLATE
/* Architecture-specific initializations and checks go here */
@@ -41,7 +41,7 @@ bool dvmCompilerArchVariantInit(void)
*/
#define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \
(intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart;
-#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#include "../../../template/armv7-a/TemplateOpList.h"
#undef JIT_TEMPLATE
/* Target-specific configuration */
@@ -50,7 +50,13 @@ bool dvmCompilerArchVariantInit(void)
if (gDvmJit.threshold == 0) {
gDvmJit.threshold = 40;
}
- gDvmJit.codeCacheSize = 1024*1024;
+ if (gDvmJit.codeCacheSize == DEFAULT_CODE_CACHE_SIZE) {
+ gDvmJit.codeCacheSize = 1500 * 1024;
+ } else if ((gDvmJit.codeCacheSize == 0) && (gDvm.executionMode == kExecutionModeJit)) {
+ gDvm.executionMode = kExecutionModeInterpFast;
+ }
+ /* Hard limit for Arm of 2M */
+ assert(gDvmJit.codeCacheSize <= 2 * 1024 * 1024);
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking */
diff --git a/vm/compiler/codegen/arm/armv7-a/ArchVariant.h b/vm/compiler/codegen/arm/armv7-a/ArchVariant.h
index b4f4eb7b7..8003f734f 100644
--- a/vm/compiler/codegen/arm/armv7-a/ArchVariant.h
+++ b/vm/compiler/codegen/arm/armv7-a/ArchVariant.h
@@ -20,7 +20,7 @@
/* Create the TemplateOpcode enum */
#define JIT_TEMPLATE(X) TEMPLATE_##X,
enum TemplateOpcode {
-#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#include "../../../template/armv7-a/TemplateOpList.h"
/*
* For example,
* TEMPLATE_CMP_LONG,
diff --git a/vm/compiler/codegen/mips/mips/ArchVariant.cpp b/vm/compiler/codegen/mips/mips/ArchVariant.cpp
index d720f85af..10f5da10d 100644
--- a/vm/compiler/codegen/mips/mips/ArchVariant.cpp
+++ b/vm/compiler/codegen/mips/mips/ArchVariant.cpp
@@ -55,7 +55,11 @@ bool dvmCompilerArchVariantInit(void)
if (gDvmJit.threshold == 0) {
gDvmJit.threshold = 200;
}
- gDvmJit.codeCacheSize = 512*1024;
+ if (gDvmJit.codeCacheSize == DEFAULT_CODE_CACHE_SIZE) {
+ gDvmJit.codeCacheSize = 512 * 1024;
+ } else if ((gDvmJit.codeCacheSize == 0) && (gDvm.executionMode == kExecutionModeJit)) {
+ gDvm.executionMode = kExecutionModeInterpFast;
+ }
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking mode */
diff --git a/vm/compiler/codegen/x86/CodegenInterface.cpp b/vm/compiler/codegen/x86/CodegenInterface.cpp
index 46f097971..337bd61ae 100644
--- a/vm/compiler/codegen/x86/CodegenInterface.cpp
+++ b/vm/compiler/codegen/x86/CodegenInterface.cpp
@@ -67,7 +67,11 @@ bool dvmCompilerArchInit() {
if (gDvmJit.threshold == 0) {
gDvmJit.threshold = 255;
}
- gDvmJit.codeCacheSize = 512*1024;
+ if (gDvmJit.codeCacheSize == DEFAULT_CODE_CACHE_SIZE) {
+ gDvmJit.codeCacheSize = 512 * 1024;
+ } else if ((gDvmJit.codeCacheSize == 0) && (gDvm.executionMode == kExecutionModeJit)) {
+ gDvm.executionMode = kExecutionModeInterpFast;
+ }
gDvmJit.optLevel = kJitOptLevelO1;
//Disable Method-JIT
@@ -1081,14 +1085,14 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit, JitTranslationInfo *info)
info->codeAddress = NULL;
stream = (char*)gDvmJit.codeCache + gDvmJit.codeCacheByteUsed;
+ streamStart = stream; /* trace start before alignment */
// TODO: compile into a temporary buffer and then copy into the code cache.
// That would let us leave the code cache unprotected for a shorter time.
size_t unprotected_code_cache_bytes =
- gDvmJit.codeCacheSize - gDvmJit.codeCacheByteUsed - CODE_CACHE_PADDING;
- UNPROTECT_CODE_CACHE(stream, unprotected_code_cache_bytes);
+ gDvmJit.codeCacheSize - gDvmJit.codeCacheByteUsed;
+ UNPROTECT_CODE_CACHE(streamStart, unprotected_code_cache_bytes);
- streamStart = stream; /* trace start before alignment */
stream += EXTRA_BYTES_FOR_CHAINING; /* This is needed for chaining. Add the bytes before the alignment */
stream = (char*)(((unsigned int)stream + 0xF) & ~0xF); /* Align trace to 16-bytes */
streamMethodStart = stream; /* code start */
@@ -1248,7 +1252,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit, JitTranslationInfo *info)
if(cg_ret < 0) {
endOfTrace(true/*freeOnly*/);
cUnit->baseAddr = NULL;
- PROTECT_CODE_CACHE(stream, unprotected_code_cache_bytes);
+ PROTECT_CODE_CACHE(streamStart, unprotected_code_cache_bytes);
return;
}
} else {
@@ -1289,7 +1293,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit, JitTranslationInfo *info)
gDvmJit.codeCacheFull = true;
cUnit->baseAddr = NULL;
endOfTrace(true/*freeOnly*/);
- PROTECT_CODE_CACHE(stream, unprotected_code_cache_bytes);
+ PROTECT_CODE_CACHE(streamStart, unprotected_code_cache_bytes);
return;
}
}
@@ -1383,7 +1387,7 @@ gen_fallthrough:
gDvmJit.codeCacheFull = true;
cUnit->baseAddr = NULL;
endOfTrace(true); /* need to free structures */
- PROTECT_CODE_CACHE(stream, unprotected_code_cache_bytes);
+ PROTECT_CODE_CACHE(streamStart, unprotected_code_cache_bytes);
return;
}
}
@@ -1399,7 +1403,7 @@ gen_fallthrough:
*/
ALOGI("JIT code cache full after endOfTrace (trace uses %uB)", (stream - streamStart));
cUnit->baseAddr = NULL;
- PROTECT_CODE_CACHE(stream, unprotected_code_cache_bytes);
+ PROTECT_CODE_CACHE(streamStart, unprotected_code_cache_bytes);
return;
}
@@ -1421,7 +1425,7 @@ gen_fallthrough:
ALOGI("JIT code cache full after ChainingCellCounts (trace uses %uB)", (stream - streamStart));
gDvmJit.codeCacheFull = true;
cUnit->baseAddr = NULL;
- PROTECT_CODE_CACHE(stream, unprotected_code_cache_bytes);
+ PROTECT_CODE_CACHE(streamStart, unprotected_code_cache_bytes);
return;
}
@@ -1430,7 +1434,7 @@ gen_fallthrough:
*pOffset = streamCountStart - streamMethodStart; /* from codeAddr */
pOffset[1] = streamChainingStart - streamMethodStart;
- PROTECT_CODE_CACHE(stream, unprotected_code_cache_bytes);
+ PROTECT_CODE_CACHE(streamStart, unprotected_code_cache_bytes);
gDvmJit.codeCacheByteUsed += (stream - streamStart);
if (cUnit->printMe) {
diff --git a/vm/compiler/codegen/x86/libenc/enc_wrapper.cpp b/vm/compiler/codegen/x86/libenc/enc_wrapper.cpp
index cf53cead3..71e0e0697 100644
--- a/vm/compiler/codegen/x86/libenc/enc_wrapper.cpp
+++ b/vm/compiler/codegen/x86/libenc/enc_wrapper.cpp
@@ -17,10 +17,10 @@
#include <stdio.h>
#include <assert.h>
#include <limits.h>
+#include "cutils/log.h"
#include "enc_base.h"
#include "enc_wrapper.h"
#include "dec_base.h"
-#include "utils/Log.h"
//#define PRINT_ENCODER_STREAM
bool dump_x86_inst = false;
diff --git a/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S b/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S
index 8a9b11574..6652b71a8 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S
@@ -22,7 +22,6 @@
mul ip, r2, r1 @ ip<- ZxW
umull r9, r10, r2, r0 @ r9/r10 <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
- mov r0,r9
- mov r1,r10
+ mov r0, r9
+ add r1, r2, r10 @ r1<- r10 + low(ZxW + (YxX))
bx lr
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index b10afcf3f..e8e2d5273 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -17,12 +17,12 @@
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
diff --git a/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S b/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S
index bdfdf28f5..d97037218 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S
@@ -15,22 +15,23 @@
* r2: Starting offset in string data
*/
+ ldr r3, [r0, #STRING_FIELDOFF_VALUE]
ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
ldr r8, [r0, #STRING_FIELDOFF_COUNT]
- ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
/*
* At this point, we have:
- * r0: object pointer
* r1: char to match
* r2: starting offset
+ * r3: object pointer (final result -> r0)
* r7: offset
* r8: string length
*/
/* Build pointer to start of string data */
- add r0, #16
- add r0, r0, r7, lsl #1
+ add r3, #16
+ add r0, r3, r7, lsl #1
/* Save a copy of starting data in r7 */
mov r7, r0
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index b567a907c..e6740d22f 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -123,7 +123,7 @@
.L__aeabi_cfcmple:
.word __aeabi_cfcmple
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/armv7-a-neon/TemplateOpList.h b/vm/compiler/template/armv7-a-neon/TemplateOpList.h
index 0365ba4d2..8d0f8d6f7 100644
--- a/vm/compiler/template/armv7-a-neon/TemplateOpList.h
+++ b/vm/compiler/template/armv7-a-neon/TemplateOpList.h
@@ -31,25 +31,6 @@ JIT_TEMPLATE(MUL_LONG)
JIT_TEMPLATE(SHL_LONG)
JIT_TEMPLATE(SHR_LONG)
JIT_TEMPLATE(USHR_LONG)
-JIT_TEMPLATE(ADD_FLOAT_VFP)
-JIT_TEMPLATE(SUB_FLOAT_VFP)
-JIT_TEMPLATE(MUL_FLOAT_VFP)
-JIT_TEMPLATE(DIV_FLOAT_VFP)
-JIT_TEMPLATE(ADD_DOUBLE_VFP)
-JIT_TEMPLATE(SUB_DOUBLE_VFP)
-JIT_TEMPLATE(MUL_DOUBLE_VFP)
-JIT_TEMPLATE(DIV_DOUBLE_VFP)
-JIT_TEMPLATE(DOUBLE_TO_FLOAT_VFP)
-JIT_TEMPLATE(DOUBLE_TO_INT_VFP)
-JIT_TEMPLATE(FLOAT_TO_DOUBLE_VFP)
-JIT_TEMPLATE(FLOAT_TO_INT_VFP)
-JIT_TEMPLATE(INT_TO_DOUBLE_VFP)
-JIT_TEMPLATE(INT_TO_FLOAT_VFP)
-JIT_TEMPLATE(CMPG_DOUBLE_VFP)
-JIT_TEMPLATE(CMPL_DOUBLE_VFP)
-JIT_TEMPLATE(CMPG_FLOAT_VFP)
-JIT_TEMPLATE(CMPL_FLOAT_VFP)
-JIT_TEMPLATE(SQRT_DOUBLE_VFP)
JIT_TEMPLATE(THROW_EXCEPTION_COMMON)
JIT_TEMPLATE(MEM_OP_DECODE)
JIT_TEMPLATE(STRING_COMPARETO)
diff --git a/vm/compiler/template/armv7-a/TemplateOpList.h b/vm/compiler/template/armv7-a/TemplateOpList.h
index 0365ba4d2..8d0f8d6f7 100644
--- a/vm/compiler/template/armv7-a/TemplateOpList.h
+++ b/vm/compiler/template/armv7-a/TemplateOpList.h
@@ -31,25 +31,6 @@ JIT_TEMPLATE(MUL_LONG)
JIT_TEMPLATE(SHL_LONG)
JIT_TEMPLATE(SHR_LONG)
JIT_TEMPLATE(USHR_LONG)
-JIT_TEMPLATE(ADD_FLOAT_VFP)
-JIT_TEMPLATE(SUB_FLOAT_VFP)
-JIT_TEMPLATE(MUL_FLOAT_VFP)
-JIT_TEMPLATE(DIV_FLOAT_VFP)
-JIT_TEMPLATE(ADD_DOUBLE_VFP)
-JIT_TEMPLATE(SUB_DOUBLE_VFP)
-JIT_TEMPLATE(MUL_DOUBLE_VFP)
-JIT_TEMPLATE(DIV_DOUBLE_VFP)
-JIT_TEMPLATE(DOUBLE_TO_FLOAT_VFP)
-JIT_TEMPLATE(DOUBLE_TO_INT_VFP)
-JIT_TEMPLATE(FLOAT_TO_DOUBLE_VFP)
-JIT_TEMPLATE(FLOAT_TO_INT_VFP)
-JIT_TEMPLATE(INT_TO_DOUBLE_VFP)
-JIT_TEMPLATE(INT_TO_FLOAT_VFP)
-JIT_TEMPLATE(CMPG_DOUBLE_VFP)
-JIT_TEMPLATE(CMPL_DOUBLE_VFP)
-JIT_TEMPLATE(CMPG_FLOAT_VFP)
-JIT_TEMPLATE(CMPL_FLOAT_VFP)
-JIT_TEMPLATE(SQRT_DOUBLE_VFP)
JIT_TEMPLATE(THROW_EXCEPTION_COMMON)
JIT_TEMPLATE(MEM_OP_DECODE)
JIT_TEMPLATE(STRING_COMPARETO)
diff --git a/vm/compiler/template/config-armv7-a b/vm/compiler/template/config-armv7-a
index 9d66e55df..6bc2e6de8 100644
--- a/vm/compiler/template/config-armv7-a
+++ b/vm/compiler/template/config-armv7-a
@@ -31,7 +31,7 @@ import armv5te-vfp/platform.S
#import c/opcommon.c
# opcode list; argument to op-start is default directory
-op-start armv5te-vfp
+op-start armv7-a
op TEMPLATE_CMP_LONG armv5te
op TEMPLATE_INVOKE_METHOD_CHAIN armv5te
op TEMPLATE_INVOKE_METHOD_NATIVE armv5te
@@ -54,6 +54,7 @@ op-start armv5te-vfp
op TEMPLATE_INVOKE_METHOD_NATIVE_PROF armv5te
op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF armv5te
op TEMPLATE_RETURN_PROF armv5te
+ op TEMPLATE_MEM_OP_DECODE armv5te-vfp
op-end
# "helper" code for C; include if you use any of the C stubs (this generates
diff --git a/vm/compiler/template/config-armv7-a-neon b/vm/compiler/template/config-armv7-a-neon
index 9d66e55df..72bdf8694 100644
--- a/vm/compiler/template/config-armv7-a-neon
+++ b/vm/compiler/template/config-armv7-a-neon
@@ -31,7 +31,7 @@ import armv5te-vfp/platform.S
#import c/opcommon.c
# opcode list; argument to op-start is default directory
-op-start armv5te-vfp
+op-start armv7-a-neon
op TEMPLATE_CMP_LONG armv5te
op TEMPLATE_INVOKE_METHOD_CHAIN armv5te
op TEMPLATE_INVOKE_METHOD_NATIVE armv5te
@@ -54,6 +54,7 @@ op-start armv5te-vfp
op TEMPLATE_INVOKE_METHOD_NATIVE_PROF armv5te
op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF armv5te
op TEMPLATE_RETURN_PROF armv5te
+ op TEMPLATE_MEM_OP_DECODE armv5te-vfp
op-end
# "helper" code for C; include if you use any of the C stubs (this generates
diff --git a/vm/compiler/template/ia32/footer.S b/vm/compiler/template/ia32/footer.S
index 226e928a9..23384a567 100644
--- a/vm/compiler/template/ia32/footer.S
+++ b/vm/compiler/template/ia32/footer.S
@@ -7,7 +7,7 @@
.section .data.rel.ro
.align 4
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/mips/footer.S b/vm/compiler/template/mips/footer.S
index 91442dd4c..436e76aef 100644
--- a/vm/compiler/template/mips/footer.S
+++ b/vm/compiler/template/mips/footer.S
@@ -134,7 +134,7 @@
.word dvmSelfVerificationMemOpDecode
#endif
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 27319e722..d4578bc55 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -178,12 +178,12 @@ dvmCompiler_TEMPLATE_RETURN:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -520,9 +520,8 @@ dvmCompiler_TEMPLATE_MUL_LONG:
mul ip, r2, r1 @ ip<- ZxW
umull r9, r10, r2, r0 @ r9/r10 <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
- mov r0,r9
- mov r1,r10
+ mov r0, r9
+ add r1, r2, r10 @ r1<- r10 + low(ZxW + (YxX))
bx lr
/* ------------------------------ */
@@ -1265,22 +1264,23 @@ dvmCompiler_TEMPLATE_STRING_INDEXOF:
* r2: Starting offset in string data
*/
+ ldr r3, [r0, #STRING_FIELDOFF_VALUE]
ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
ldr r8, [r0, #STRING_FIELDOFF_COUNT]
- ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
/*
* At this point, we have:
- * r0: object pointer
* r1: char to match
* r2: starting offset
+ * r3: object pointer (final result -> r0)
* r7: offset
* r8: string length
*/
/* Build pointer to start of string data */
- add r0, #16
- add r0, r0, r7, lsl #1
+ add r3, #16
+ add r0, r3, r7, lsl #1
/* Save a copy of starting data in r7 */
mov r7, r0
@@ -1516,12 +1516,12 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -1974,8 +1974,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
.L__aeabi_cfcmple:
.word __aeabi_cfcmple
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 68f644185..3efcdd609 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -178,12 +178,12 @@ dvmCompiler_TEMPLATE_RETURN:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -732,9 +732,8 @@ dvmCompiler_TEMPLATE_MUL_LONG:
mul ip, r2, r1 @ ip<- ZxW
umull r9, r10, r2, r0 @ r9/r10 <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
- mov r0,r9
- mov r1,r10
+ mov r0, r9
+ add r1, r2, r10 @ r1<- r10 + low(ZxW + (YxX))
bx lr
/* ------------------------------ */
@@ -996,22 +995,23 @@ dvmCompiler_TEMPLATE_STRING_INDEXOF:
* r2: Starting offset in string data
*/
+ ldr r3, [r0, #STRING_FIELDOFF_VALUE]
ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
ldr r8, [r0, #STRING_FIELDOFF_COUNT]
- ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
/*
* At this point, we have:
- * r0: object pointer
* r1: char to match
* r2: starting offset
+ * r3: object pointer (final result -> r0)
* r7: offset
* r8: string length
*/
/* Build pointer to start of string data */
- add r0, #16
- add r0, r0, r7, lsl #1
+ add r3, #16
+ add r0, r3, r7, lsl #1
/* Save a copy of starting data in r7 */
mov r7, r0
@@ -1247,12 +1247,12 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -1705,8 +1705,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
.L__aeabi_cfcmple:
.word __aeabi_cfcmple
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index 7573bd8bc..4b2c15ce6 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -178,12 +178,12 @@ dvmCompiler_TEMPLATE_RETURN:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -520,9 +520,8 @@ dvmCompiler_TEMPLATE_MUL_LONG:
mul ip, r2, r1 @ ip<- ZxW
umull r9, r10, r2, r0 @ r9/r10 <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
- mov r0,r9
- mov r1,r10
+ mov r0, r9
+ add r1, r2, r10 @ r1<- r10 + low(ZxW + (YxX))
bx lr
/* ------------------------------ */
@@ -590,485 +589,6 @@ dvmCompiler_TEMPLATE_USHR_LONG:
/* ------------------------------ */
.balign 4
- .global dvmCompiler_TEMPLATE_ADD_FLOAT_VFP
-dvmCompiler_TEMPLATE_ADD_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_ADD_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fadds s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_SUB_FLOAT_VFP
-dvmCompiler_TEMPLATE_SUB_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_SUB_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fsubs s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_MUL_FLOAT_VFP
-dvmCompiler_TEMPLATE_MUL_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_MUL_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fmuls s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DIV_FLOAT_VFP
-dvmCompiler_TEMPLATE_DIV_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_DIV_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fdivs s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP
-dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_ADD_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- faddd d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP
-dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_SUB_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- fsubd d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP
-dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_MUL_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- fmuld d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP
-dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_DIV_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- fdivd d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP
-dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S */
-/* File: armv5te-vfp/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- fldd d0, [r1] @ d0<- vB
- fcvtsd s0, d0 @ s0<- op d0
- fsts s0, [r0] @ vA<- s0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP
-dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP:
-/* File: armv5te-vfp/TEMPLATE_DOUBLE_TO_INT_VFP.S */
-/* File: armv5te-vfp/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- fldd d0, [r1] @ d0<- vB
- ftosizd s0, d0 @ s0<- op d0
- fsts s0, [r0] @ vA<- s0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP
-dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S */
-/* File: armv5te-vfp/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- fcvtds d0, s0 @ d0<- op s0
- fstd d0, [r0] @ vA<- d0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP
-dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP:
-/* File: armv5te-vfp/TEMPLATE_FLOAT_TO_INT_VFP.S */
-/* File: armv5te-vfp/funop.S */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s1 = op s0".
- *
- * For: float-to-int, int-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- ftosizs s1, s0 @ s1<- op s0
- fsts s1, [r0] @ vA<- s1
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP
-dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_INT_TO_DOUBLE_VFP.S */
-/* File: armv5te-vfp/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- fsitod d0, s0 @ d0<- op s0
- fstd d0, [r0] @ vA<- d0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP
-dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_INT_TO_FLOAT_VFP.S */
-/* File: armv5te-vfp/funop.S */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s1 = op s0".
- *
- * For: float-to-int, int-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- fsitos s1, s0 @ s1<- op s0
- fsts s1, [r0] @ vA<- s1
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP
-dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPG_DOUBLE_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- *
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- fldd d0, [r0] @ d0<- vBB
- fldd d1, [r1] @ d1<- vCC
- fcmpd d0, d1 @ compare (vBB, vCC)
- mov r0, #1 @ r0<- 1 (default)
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r0<- -1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP
-dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPL_DOUBLE_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- fldd d0, [r0] @ d0<- vBB
- fldd d1, [r1] @ d1<- vCC
- fcmped d0, d1 @ compare (vBB, vCC)
- mvn r0, #0 @ r0<- -1 (default)
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r0<- 1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP
-dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPG_FLOAT_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- flds s0, [r0] @ d0<- vBB
- flds s1, [r1] @ d1<- vCC
- fcmps s0, s1 @ compare (vBB, vCC)
- mov r0, #1 @ r0<- 1 (default)
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r0<- -1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP
-dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPL_FLOAT_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- flds s0, [r0] @ d0<- vBB
- flds s1, [r1] @ d1<- vCC
- fcmps s0, s1 @ compare (vBB, vCC)
- mvn r0, #0 @ r0<- -1 (default)
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r0<- 1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP
-dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_SQRT_DOUBLE_VFP.S */
- /*
- * 64-bit floating point vfp sqrt operation.
- * If the result is a NaN, bail out to library code to do
- * the right thing.
- *
- * On entry:
- * r2 src addr of op1
- * On exit:
- * r0,r1 = res
- */
- fldd d0, [r2]
- fsqrtd d1, d0
- fcmpd d1, d1
- fmstat
- fmrrd r0, r1, d1
- bxeq lr @ Result OK - return
- ldr r2, .Lsqrt
- fmrrd r0, r1, d0 @ reload orig operand
- bx r2 @ tail call to sqrt library routine
-
-.Lsqrt:
- .word sqrt
-
-/* ------------------------------ */
- .balign 4
.global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON
dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON:
/* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */
@@ -1265,22 +785,23 @@ dvmCompiler_TEMPLATE_STRING_INDEXOF:
* r2: Starting offset in string data
*/
+ ldr r3, [r0, #STRING_FIELDOFF_VALUE]
ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
ldr r8, [r0, #STRING_FIELDOFF_COUNT]
- ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
/*
* At this point, we have:
- * r0: object pointer
* r1: char to match
* r2: starting offset
+ * r3: object pointer (final result -> r0)
* r7: offset
* r8: string length
*/
/* Build pointer to start of string data */
- add r0, #16
- add r0, r0, r7, lsl #1
+ add r3, #16
+ add r0, r3, r7, lsl #1
/* Save a copy of starting data in r7 */
mov r7, r0
@@ -1516,12 +1037,12 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -1974,8 +1495,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
.L__aeabi_cfcmple:
.word __aeabi_cfcmple
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index fd21a0e91..9f85e1f66 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -178,12 +178,12 @@ dvmCompiler_TEMPLATE_RETURN:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -520,9 +520,8 @@ dvmCompiler_TEMPLATE_MUL_LONG:
mul ip, r2, r1 @ ip<- ZxW
umull r9, r10, r2, r0 @ r9/r10 <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
- mov r0,r9
- mov r1,r10
+ mov r0, r9
+ add r1, r2, r10 @ r1<- r10 + low(ZxW + (YxX))
bx lr
/* ------------------------------ */
@@ -590,485 +589,6 @@ dvmCompiler_TEMPLATE_USHR_LONG:
/* ------------------------------ */
.balign 4
- .global dvmCompiler_TEMPLATE_ADD_FLOAT_VFP
-dvmCompiler_TEMPLATE_ADD_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_ADD_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fadds s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_SUB_FLOAT_VFP
-dvmCompiler_TEMPLATE_SUB_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_SUB_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fsubs s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_MUL_FLOAT_VFP
-dvmCompiler_TEMPLATE_MUL_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_MUL_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fmuls s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DIV_FLOAT_VFP
-dvmCompiler_TEMPLATE_DIV_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_DIV_FLOAT_VFP.S */
-/* File: armv5te-vfp/fbinop.S */
- /*
- * Generic 32-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- flds s0,[r1]
- flds s1,[r2]
- fdivs s2, s0, s1
- fsts s2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP
-dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_ADD_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- faddd d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP
-dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_SUB_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- fsubd d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP
-dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_MUL_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- fmuld d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP
-dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_DIV_DOUBLE_VFP.S */
-/* File: armv5te-vfp/fbinopWide.S */
- /*
- * Generic 64-bit floating point operation. Provide an "instr" line that
- * specifies an instruction that performs s2 = s0 op s1.
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = op1 address
- * r2 = op2 address
- */
- fldd d0,[r1]
- fldd d1,[r2]
- fdivd d2, d0, d1
- fstd d2,[r0]
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP
-dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S */
-/* File: armv5te-vfp/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- fldd d0, [r1] @ d0<- vB
- fcvtsd s0, d0 @ s0<- op d0
- fsts s0, [r0] @ vA<- s0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP
-dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP:
-/* File: armv5te-vfp/TEMPLATE_DOUBLE_TO_INT_VFP.S */
-/* File: armv5te-vfp/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- fldd d0, [r1] @ d0<- vB
- ftosizd s0, d0 @ s0<- op d0
- fsts s0, [r0] @ vA<- s0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP
-dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S */
-/* File: armv5te-vfp/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- fcvtds d0, s0 @ d0<- op s0
- fstd d0, [r0] @ vA<- d0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP
-dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP:
-/* File: armv5te-vfp/TEMPLATE_FLOAT_TO_INT_VFP.S */
-/* File: armv5te-vfp/funop.S */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s1 = op s0".
- *
- * For: float-to-int, int-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- ftosizs s1, s0 @ s1<- op s0
- fsts s1, [r0] @ vA<- s1
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP
-dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_INT_TO_DOUBLE_VFP.S */
-/* File: armv5te-vfp/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- fsitod d0, s0 @ d0<- op s0
- fstd d0, [r0] @ vA<- d0
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP
-dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_INT_TO_FLOAT_VFP.S */
-/* File: armv5te-vfp/funop.S */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s1 = op s0".
- *
- * For: float-to-int, int-to-float
- *
- * On entry:
- * r0 = target dalvik register address
- * r1 = src dalvik register address
- */
- /* unop vA, vB */
- flds s0, [r1] @ s0<- vB
- fsitos s1, s0 @ s1<- op s0
- fsts s1, [r0] @ vA<- s1
- bx lr
-
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP
-dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPG_DOUBLE_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- *
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- fldd d0, [r0] @ d0<- vBB
- fldd d1, [r1] @ d1<- vCC
- fcmpd d0, d1 @ compare (vBB, vCC)
- mov r0, #1 @ r0<- 1 (default)
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r0<- -1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP
-dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPL_DOUBLE_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- fldd d0, [r0] @ d0<- vBB
- fldd d1, [r1] @ d1<- vCC
- fcmped d0, d1 @ compare (vBB, vCC)
- mvn r0, #0 @ r0<- -1 (default)
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r0<- 1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP
-dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPG_FLOAT_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- flds s0, [r0] @ d0<- vBB
- flds s1, [r1] @ d1<- vCC
- fcmps s0, s1 @ compare (vBB, vCC)
- mov r0, #1 @ r0<- 1 (default)
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r0<- -1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP
-dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP:
-/* File: armv5te-vfp/TEMPLATE_CMPL_FLOAT_VFP.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- * On entry:
- * r0 = &op1 [vBB]
- * r1 = &op2 [vCC]
- */
- /* op vAA, vBB, vCC */
- flds s0, [r0] @ d0<- vBB
- flds s1, [r1] @ d1<- vCC
- fcmps s0, s1 @ compare (vBB, vCC)
- mvn r0, #0 @ r0<- -1 (default)
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r0<- 1
- moveq r0, #0 @ (equal) r0<- 0
- bx lr
-
-/* ------------------------------ */
- .balign 4
- .global dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP
-dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP:
-/* File: armv5te-vfp/TEMPLATE_SQRT_DOUBLE_VFP.S */
- /*
- * 64-bit floating point vfp sqrt operation.
- * If the result is a NaN, bail out to library code to do
- * the right thing.
- *
- * On entry:
- * r2 src addr of op1
- * On exit:
- * r0,r1 = res
- */
- fldd d0, [r2]
- fsqrtd d1, d0
- fcmpd d1, d1
- fmstat
- fmrrd r0, r1, d1
- bxeq lr @ Result OK - return
- ldr r2, .Lsqrt
- fmrrd r0, r1, d0 @ reload orig operand
- bx r2 @ tail call to sqrt library routine
-
-.Lsqrt:
- .word sqrt
-
-/* ------------------------------ */
- .balign 4
.global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON
dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON:
/* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */
@@ -1265,22 +785,23 @@ dvmCompiler_TEMPLATE_STRING_INDEXOF:
* r2: Starting offset in string data
*/
+ ldr r3, [r0, #STRING_FIELDOFF_VALUE]
ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
ldr r8, [r0, #STRING_FIELDOFF_COUNT]
- ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
/*
* At this point, we have:
- * r0: object pointer
* r1: char to match
* r2: starting offset
+ * r3: object pointer (final result -> r0)
* r7: offset
* r8: string length
*/
/* Build pointer to start of string data */
- add r0, #16
- add r0, r0, r7, lsl #1
+ add r3, #16
+ add r0, r3, r7, lsl #1
/* Save a copy of starting data in r7 */
mov r7, r0
@@ -1516,12 +1037,12 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
#else
mov r9, #0 @ disable chaining
#endif
- ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
cmp r2, #0 @ break frame?
#if !defined(WITH_SELF_VERIFICATION)
@@ -1974,8 +1495,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
.L__aeabi_cfcmple:
.word __aeabi_cfcmple
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
index eef9d65b1..267b2a785 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
@@ -106,8 +106,8 @@ dvmCompiler_TEMPLATE_INTERPRET:
.section .data.rel.ro
.align 4
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-mips.S b/vm/compiler/template/out/CompilerTemplateAsm-mips.S
index fb8402e91..d7f9d62a9 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-mips.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-mips.S
@@ -3400,8 +3400,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
.word dvmSelfVerificationMemOpDecode
#endif
- .global dmvCompilerTemplateEnd
-dmvCompilerTemplateEnd:
+ .global dvmCompilerTemplateEnd
+dvmCompilerTemplateEnd:
#endif /* WITH_JIT */
diff --git a/vm/dalvik b/vm/dalvik
index cb46775f0..9229e5a1a 100644
--- a/vm/dalvik
+++ b/vm/dalvik
@@ -23,6 +23,8 @@ LD_LIBRARY_PATH=$ANDROID_BUILD_TOP/out/host/linux-x86/lib \
exec $ANDROID_BUILD_TOP/out/host/linux-x86/bin/dalvikvm \
-Xbootclasspath\
:$ANDROID_BUILD_TOP/out/host/linux-x86/framework/core-hostdex.jar\
+:$ANDROID_BUILD_TOP/out/host/linux-x86/framework/conscrypt-hostdex.jar\
+:$ANDROID_BUILD_TOP/out/host/linux-x86/framework/okhttp-hostdex.jar\
:$ANDROID_BUILD_TOP/out/host/linux-x86/framework/bouncycastle-hostdex.jar\
:$ANDROID_BUILD_TOP/out/host/linux-x86/framework/apache-xml-hostdex.jar \
$*
diff --git a/vm/hprof/Hprof.h b/vm/hprof/Hprof.h
index 3ee8c61a3..c8075291a 100644
--- a/vm/hprof/Hprof.h
+++ b/vm/hprof/Hprof.h
@@ -176,8 +176,8 @@ int hprofFinishHeapDump(hprof_context_t *ctx);
int hprofSetGcScanState(hprof_context_t *ctx,
hprof_heap_tag_t state, u4 threadSerialNumber);
-int hprofMarkRootObject(hprof_context_t *ctx,
- const Object *obj, jobject jniObj);
+void hprofMarkRootObject(hprof_context_t *ctx,
+ const Object *obj, jobject jniObj);
int hprofDumpHeapObject(hprof_context_t *ctx, const Object *obj);
diff --git a/vm/hprof/HprofHeap.cpp b/vm/hprof/HprofHeap.cpp
index 40e773bb8..8c34ff346 100644
--- a/vm/hprof/HprofHeap.cpp
+++ b/vm/hprof/HprofHeap.cpp
@@ -119,14 +119,14 @@ static hprof_basic_type primitiveToBasicTypeAndSize(PrimitiveType prim,
* only true when marking the root set or unreachable
* objects. Used to add rootset references to obj.
*/
-int hprofMarkRootObject(hprof_context_t *ctx, const Object *obj, jobject jniObj)
+void hprofMarkRootObject(hprof_context_t *ctx, const Object *obj,
+ jobject jniObj)
{
hprof_record_t *rec = &ctx->curRec;
- int err;
hprof_heap_tag_t heapTag = (hprof_heap_tag_t)ctx->gcScanState;
if (heapTag == 0) {
- return 0;
+ return;
}
if (ctx->objectsInSegment >= OBJECTS_PER_SEGMENT ||
@@ -197,13 +197,10 @@ int hprofMarkRootObject(hprof_context_t *ctx, const Object *obj, jobject jniObj)
break;
default:
- err = 0;
break;
}
ctx->objectsInSegment++;
-
- return err;
}
static int stackTraceSerialNumber(const void *obj)
diff --git a/vm/interp/Interp.cpp b/vm/interp/Interp.cpp
index fa77523ac..42e2ecad7 100644
--- a/vm/interp/Interp.cpp
+++ b/vm/interp/Interp.cpp
@@ -1660,8 +1660,13 @@ void dvmInitializeInterpBreak(Thread* thread)
if (gDvm.instructionCountEnableCount > 0) {
dvmEnableSubMode(thread, kSubModeInstCounting);
}
- if (dvmIsMethodTraceActive()) {
- dvmEnableSubMode(thread, kSubModeMethodTrace);
+ TracingMode mode = dvmGetMethodTracingMode();
+ if (mode != TRACING_INACTIVE) {
+ if (mode == SAMPLE_PROFILING_ACTIVE) {
+ dvmEnableSubMode(thread, kSubModeSampleTrace);
+ } else {
+ dvmEnableSubMode(thread, kSubModeMethodTrace);
+ }
}
if (gDvm.emulatorTraceEnableCount > 0) {
dvmEnableSubMode(thread, kSubModeEmulatorTrace);
diff --git a/vm/interp/InterpState.h b/vm/interp/InterpState.h
index cc0a13fb9..8d2c22430 100644
--- a/vm/interp/InterpState.h
+++ b/vm/interp/InterpState.h
@@ -63,6 +63,7 @@ enum ExecutionSubModes {
kSubModeCallbackPending = 0x0020,
kSubModeCountedStep = 0x0040,
kSubModeCheckAlways = 0x0080,
+ kSubModeSampleTrace = 0x0100,
kSubModeJitTraceBuild = 0x4000,
kSubModeJitSV = 0x8000,
kSubModeDebugProfile = (kSubModeMethodTrace |
@@ -191,7 +192,7 @@ enum SelfVerificationState {
/* Number of entries in the 2nd level JIT profiler filter cache */
#define JIT_TRACE_THRESH_FILTER_SIZE 32
/* Number of low dalvik pc address bits to include in 2nd level filter key */
-#define JIT_TRACE_THRESH_FILTER_PC_BITS 4
+#define JIT_TRACE_THRESH_FILTER_PC_BITS 16
#define MAX_JIT_RUN_LEN 64
enum JitHint {
diff --git a/vm/jdwp/JdwpHandler.cpp b/vm/jdwp/JdwpHandler.cpp
index 9126584ed..112ac4a0d 100644
--- a/vm/jdwp/JdwpHandler.cpp
+++ b/vm/jdwp/JdwpHandler.cpp
@@ -1702,7 +1702,6 @@ static JdwpError handleDDM_Chunk(JdwpState* state,
* heap requirements is probably more valuable than the efficiency.
*/
if (dvmDbgDdmHandlePacket(buf, dataLen, &replyBuf, &replyLen)) {
- assert(replyLen > 0 && replyLen < 1*1024*1024);
memcpy(expandBufAddSpace(pReply, replyLen), replyBuf, replyLen);
free(replyBuf);
}
diff --git a/vm/native/dalvik_system_DexFile.cpp b/vm/native/dalvik_system_DexFile.cpp
index 69cb71d9b..af1deb6db 100644
--- a/vm/native/dalvik_system_DexFile.cpp
+++ b/vm/native/dalvik_system_DexFile.cpp
@@ -130,7 +130,7 @@ static void addToDexFileTable(DexOrJar* pDexOrJar) {
}
/*
- * private static int openDexFile(String sourceName, String outputName,
+ * private static int openDexFileNative(String sourceName, String outputName,
* int flags) throws IOException
*
* Open a DEX file, returning a pointer to our internal data structure.
@@ -148,7 +148,7 @@ static void addToDexFileTable(DexOrJar* pDexOrJar) {
*
* TODO: should be using "long" for a pointer.
*/
-static void Dalvik_dalvik_system_DexFile_openDexFile(const u4* args,
+static void Dalvik_dalvik_system_DexFile_openDexFileNative(const u4* args,
JValue* pResult)
{
StringObject* sourceNameObj = (StringObject*) args[0];
@@ -333,7 +333,7 @@ static void Dalvik_dalvik_system_DexFile_closeDexFile(const u4* args,
}
/*
- * private static Class defineClass(String name, ClassLoader loader,
+ * private static Class defineClassNative(String name, ClassLoader loader,
* int cookie)
*
* Load a class from a DEX file. This is roughly equivalent to defineClass()
@@ -346,7 +346,7 @@ static void Dalvik_dalvik_system_DexFile_closeDexFile(const u4* args,
* Returns a null pointer with no exception if the class was not found.
* Throws an exception on other failures.
*/
-static void Dalvik_dalvik_system_DexFile_defineClass(const u4* args,
+static void Dalvik_dalvik_system_DexFile_defineClassNative(const u4* args,
JValue* pResult)
{
StringObject* nameObj = (StringObject*) args[0];
@@ -517,14 +517,14 @@ static void Dalvik_dalvik_system_DexFile_isDexOptNeeded(const u4* args,
}
const DalvikNativeMethod dvm_dalvik_system_DexFile[] = {
- { "openDexFile", "(Ljava/lang/String;Ljava/lang/String;I)I",
- Dalvik_dalvik_system_DexFile_openDexFile },
+ { "openDexFileNative", "(Ljava/lang/String;Ljava/lang/String;I)I",
+ Dalvik_dalvik_system_DexFile_openDexFileNative },
{ "openDexFile", "([B)I",
Dalvik_dalvik_system_DexFile_openDexFile_bytearray },
{ "closeDexFile", "(I)V",
Dalvik_dalvik_system_DexFile_closeDexFile },
- { "defineClass", "(Ljava/lang/String;Ljava/lang/ClassLoader;I)Ljava/lang/Class;",
- Dalvik_dalvik_system_DexFile_defineClass },
+ { "defineClassNative", "(Ljava/lang/String;Ljava/lang/ClassLoader;I)Ljava/lang/Class;",
+ Dalvik_dalvik_system_DexFile_defineClassNative },
{ "getClassNameList", "(I)[Ljava/lang/String;",
Dalvik_dalvik_system_DexFile_getClassNameList },
{ "isDexOptNeeded", "(Ljava/lang/String;)Z",
diff --git a/vm/native/dalvik_system_VMDebug.cpp b/vm/native/dalvik_system_VMDebug.cpp
index f6d91a24a..53773575f 100644
--- a/vm/native/dalvik_system_VMDebug.cpp
+++ b/vm/native/dalvik_system_VMDebug.cpp
@@ -18,6 +18,7 @@
* dalvik.system.VMDebug
*/
#include "Dalvik.h"
+#include "alloc/HeapSource.h"
#include "native/InternalNativePriv.h"
#include "hprof/Hprof.h"
@@ -55,6 +56,7 @@ static void Dalvik_dalvik_system_VMDebug_getVmFeatureList(const u4* args, JValue
std::vector<std::string> features;
features.push_back("method-trace-profiling");
features.push_back("method-trace-profiling-streaming");
+ features.push_back("method-sample-profiling");
features.push_back("hprof-heap-dump");
features.push_back("hprof-heap-dump-streaming");
@@ -223,16 +225,30 @@ static void Dalvik_dalvik_system_VMDebug_resetAllocCount(const u4* args,
}
/*
- * static void startMethodTracingNative(String traceFileName,
- * FileDescriptor fd, int bufferSize, int flags)
+ * static void startMethodTracingDdmsImpl(int bufferSize, int flags,
+ * boolean samplingEnabled, int intervalUs)
*
- * Start method trace profiling.
+ * Start method trace profiling, sending results directly to DDMS.
+ */
+static void Dalvik_dalvik_system_VMDebug_startMethodTracingDdmsImpl(const u4* args,
+ JValue* pResult)
+{
+ int bufferSize = args[0];
+ int flags = args[1];
+ bool samplingEnabled = args[2];
+ int intervalUs = args[3];
+ dvmMethodTraceStart("[DDMS]", -1, bufferSize, flags, true, samplingEnabled,
+ intervalUs);
+ RETURN_VOID();
+}
+
+/*
+ * static void startMethodTracingFd(String traceFileName, FileDescriptor fd,
+ * int bufferSize, int flags)
*
- * If both "traceFileName" and "fd" are null, the result will be sent
- * directly to DDMS. (The non-DDMS versions of the calls are expected
- * to enforce non-NULL filenames.)
+ * Start method trace profiling, sending results to a file descriptor.
*/
-static void Dalvik_dalvik_system_VMDebug_startMethodTracingNative(const u4* args,
+static void Dalvik_dalvik_system_VMDebug_startMethodTracingFd(const u4* args,
JValue* pResult)
{
StringObject* traceFileStr = (StringObject*) args[0];
@@ -240,51 +256,61 @@ static void Dalvik_dalvik_system_VMDebug_startMethodTracingNative(const u4* args
int bufferSize = args[2];
int flags = args[3];
- if (bufferSize == 0) {
- // Default to 8MB per the documentation.
- bufferSize = 8 * 1024 * 1024;
+ int origFd = getFileDescriptor(traceFd);
+ if (origFd < 0)
+ RETURN_VOID();
+
+ int fd = dup(origFd);
+ if (fd < 0) {
+ dvmThrowExceptionFmt(gDvm.exRuntimeException,
+ "dup(%d) failed: %s", origFd, strerror(errno));
+ RETURN_VOID();
}
- if (bufferSize < 1024) {
- dvmThrowIllegalArgumentException(NULL);
+ char* traceFileName = dvmCreateCstrFromString(traceFileStr);
+ if (traceFileName == NULL) {
RETURN_VOID();
}
- char* traceFileName = NULL;
- if (traceFileStr != NULL)
- traceFileName = dvmCreateCstrFromString(traceFileStr);
+ dvmMethodTraceStart(traceFileName, fd, bufferSize, flags, false, false, 0);
+ free(traceFileName);
+ RETURN_VOID();
+}
- int fd = -1;
- if (traceFd != NULL) {
- int origFd = getFileDescriptor(traceFd);
- if (origFd < 0)
- RETURN_VOID();
+/*
+ * static void startMethodTracingFilename(String traceFileName, int bufferSize,
+ * int flags)
+ *
+ * Start method trace profiling, sending results to a file.
+ */
+static void Dalvik_dalvik_system_VMDebug_startMethodTracingFilename(const u4* args,
+ JValue* pResult)
+{
+ StringObject* traceFileStr = (StringObject*) args[0];
+ int bufferSize = args[1];
+ int flags = args[2];
- fd = dup(origFd);
- if (fd < 0) {
- dvmThrowExceptionFmt(gDvm.exRuntimeException,
- "dup(%d) failed: %s", origFd, strerror(errno));
- RETURN_VOID();
- }
+ char* traceFileName = dvmCreateCstrFromString(traceFileStr);
+ if (traceFileName == NULL) {
+ RETURN_VOID();
}
- dvmMethodTraceStart(traceFileName != NULL ? traceFileName : "[DDMS]",
- fd, bufferSize, flags, (traceFileName == NULL && fd == -1));
+ dvmMethodTraceStart(traceFileName, -1, bufferSize, flags, false, false, 0);
free(traceFileName);
RETURN_VOID();
}
/*
- * static boolean isMethodTracingActive()
+ * static int getMethodTracingMode()
*
- * Determine whether method tracing is currently active.
+ * Determine whether method tracing is currently active and what type is active.
*/
-static void Dalvik_dalvik_system_VMDebug_isMethodTracingActive(const u4* args,
+static void Dalvik_dalvik_system_VMDebug_getMethodTracingMode(const u4* args,
JValue* pResult)
{
UNUSED_PARAMETER(args);
- RETURN_BOOLEAN(dvmIsMethodTraceActive());
+ RETURN_INT(dvmGetMethodTracingMode());
}
/*
@@ -745,21 +771,64 @@ static void Dalvik_dalvik_system_VMDebug_countInstancesOfClass(const u4* args,
}
}
+/*
+ * public static native void getHeapSpaceStats(long[] data)
+ */
+static void Dalvik_dalvik_system_VMDebug_getHeapSpaceStats(const u4* args,
+ JValue* pResult)
+{
+ ArrayObject* dataArray = (ArrayObject*) args[0];
+
+ if (dataArray == NULL || dataArray->length < 6) {
+ RETURN_VOID();
+ }
+
+ jlong* arr = (jlong*)(void*)dataArray->contents;
+
+ int j = 0;
+ size_t per_heap_allocated[2];
+ size_t per_heap_size[2];
+ memset(per_heap_allocated, 0, sizeof(per_heap_allocated));
+ memset(per_heap_size, 0, sizeof(per_heap_size));
+ dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, (size_t*) &per_heap_allocated, 2);
+ dvmHeapSourceGetValue(HS_FOOTPRINT, (size_t*) &per_heap_size, 2);
+ jlong heapSize = per_heap_size[0];
+ jlong heapUsed = per_heap_allocated[0];
+ jlong heapFree = heapSize - heapUsed;
+ jlong zygoteSize = per_heap_size[1];
+ jlong zygoteUsed = per_heap_allocated[1];
+ jlong zygoteFree = zygoteSize - zygoteUsed;
+ arr[j++] = heapSize;
+ arr[j++] = heapUsed;
+ arr[j++] = heapFree;
+ arr[j++] = zygoteSize;
+ arr[j++] = zygoteUsed;
+ arr[j++] = zygoteFree;
+
+ RETURN_VOID();
+}
+
const DalvikNativeMethod dvm_dalvik_system_VMDebug[] = {
{ "getVmFeatureList", "()[Ljava/lang/String;",
Dalvik_dalvik_system_VMDebug_getVmFeatureList },
{ "getAllocCount", "(I)I",
Dalvik_dalvik_system_VMDebug_getAllocCount },
+ { "getHeapSpaceStats", "([J)V",
+ Dalvik_dalvik_system_VMDebug_getHeapSpaceStats },
{ "resetAllocCount", "(I)V",
Dalvik_dalvik_system_VMDebug_resetAllocCount },
{ "startAllocCounting", "()V",
Dalvik_dalvik_system_VMDebug_startAllocCounting },
{ "stopAllocCounting", "()V",
Dalvik_dalvik_system_VMDebug_stopAllocCounting },
- { "startMethodTracingNative", "(Ljava/lang/String;Ljava/io/FileDescriptor;II)V",
- Dalvik_dalvik_system_VMDebug_startMethodTracingNative },
- { "isMethodTracingActive", "()Z",
- Dalvik_dalvik_system_VMDebug_isMethodTracingActive },
+ { "startMethodTracingDdmsImpl", "(IIZI)V",
+ Dalvik_dalvik_system_VMDebug_startMethodTracingDdmsImpl },
+ { "startMethodTracingFd", "(Ljava/lang/String;Ljava/io/FileDescriptor;II)V",
+ Dalvik_dalvik_system_VMDebug_startMethodTracingFd },
+ { "startMethodTracingFilename", "(Ljava/lang/String;II)V",
+ Dalvik_dalvik_system_VMDebug_startMethodTracingFilename },
+ { "getMethodTracingMode", "()I",
+ Dalvik_dalvik_system_VMDebug_getMethodTracingMode },
{ "stopMethodTracing", "()V",
Dalvik_dalvik_system_VMDebug_stopMethodTracing },
{ "startEmulatorTracing", "()V",
diff --git a/vm/native/dalvik_system_VMRuntime.cpp b/vm/native/dalvik_system_VMRuntime.cpp
index ef95ea3f2..69f9adb43 100644
--- a/vm/native/dalvik_system_VMRuntime.cpp
+++ b/vm/native/dalvik_system_VMRuntime.cpp
@@ -19,11 +19,15 @@
*/
#include "Dalvik.h"
#include "ScopedPthreadMutexLock.h"
+#include "UniquePtr.h"
+#include "alloc/HeapSource.h"
+#include "alloc/Visit.h"
+#include "libdex/DexClass.h"
#include "native/InternalNativePriv.h"
-#include <cutils/array.h>
#include <limits.h>
+#include <map>
/*
* public native float getTargetHeapUtilization()
@@ -217,6 +221,12 @@ static void Dalvik_dalvik_system_VMRuntime_vmVersion(const u4* args,
returnCString(pResult, buf);
}
+static void Dalvik_dalvik_system_VMRuntime_vmLibrary(const u4* args,
+ JValue* pResult)
+{
+ returnCString(pResult, "libdvm.so");
+}
+
static void Dalvik_dalvik_system_VMRuntime_setTargetSdkVersion(const u4* args,
JValue* pResult)
{
@@ -225,10 +235,347 @@ static void Dalvik_dalvik_system_VMRuntime_setTargetSdkVersion(const u4* args,
// Note that this value may be 0, meaning "current".
int targetSdkVersion = args[1];
if (targetSdkVersion > 0 && targetSdkVersion <= 13 /* honeycomb-mr2 */) {
- // TODO: running with CheckJNI should override this and force you to obey the strictest rules.
- ALOGI("Turning on JNI app bug workarounds for target SDK version %i...", targetSdkVersion);
- gDvmJni.workAroundAppJniBugs = true;
+ if (gDvmJni.useCheckJni) {
+ ALOGI("CheckJNI enabled: not enabling JNI app bug workarounds.");
+ } else {
+ ALOGI("Enabling JNI app bug workarounds for target SDK version %i...",
+ targetSdkVersion);
+ gDvmJni.workAroundAppJniBugs = true;
+ }
+ }
+ RETURN_VOID();
+}
+
+static void Dalvik_dalvik_system_VMRuntime_registerNativeAllocation(const u4* args,
+ JValue* pResult)
+{
+ int bytes = args[1];
+ if (bytes < 0) {
+ dvmThrowRuntimeException("allocation size negative");
+ } else {
+ dvmHeapSourceRegisterNativeAllocation(bytes);
+ }
+ RETURN_VOID();
+}
+
+static void Dalvik_dalvik_system_VMRuntime_registerNativeFree(const u4* args,
+ JValue* pResult)
+{
+ int bytes = args[1];
+ if (bytes < 0) {
+ dvmThrowRuntimeException("allocation size negative");
+ } else {
+ dvmHeapSourceRegisterNativeFree(bytes);
+ }
+ RETURN_VOID();
+}
+
+static DvmDex* getDvmDexFromClassPathEntry(ClassPathEntry* cpe) {
+ if (cpe->kind == kCpeDex) {
+ return ((RawDexFile*) cpe->ptr)->pDvmDex;
+ }
+ if (cpe->kind == kCpeJar) {
+ return ((JarFile*) cpe->ptr)->pDvmDex;
+ }
+ LOG_ALWAYS_FATAL("Unknown cpe->kind=%d", cpe->kind);
+}
+
+typedef std::map<std::string, StringObject*> StringTable;
+
+static void preloadDexCachesStringsVisitor(void* addr, u4 threadId, RootType type, void* arg) {
+ StringTable& table = *(StringTable*) arg;
+ StringObject* strObj = *(StringObject**) addr;
+ LOG_FATAL_IF(strObj->clazz != gDvm.classJavaLangString, "Unknown class for supposed string");
+ char* newStr = dvmCreateCstrFromString(strObj);
+ // ALOGI("VMRuntime.preloadDexCaches interned=%s", newStr);
+ table[newStr] = strObj;
+ free(newStr);
+}
+
+// Based on dvmResolveString.
+static void preloadDexCachesResolveString(DvmDex* pDvmDex,
+ uint32_t stringIdx,
+ StringTable& strings) {
+ StringObject* string = dvmDexGetResolvedString(pDvmDex, stringIdx);
+ if (string != NULL) {
+ return;
+ }
+ const DexFile* pDexFile = pDvmDex->pDexFile;
+ uint32_t utf16Size;
+ const char* utf8 = dexStringAndSizeById(pDexFile, stringIdx, &utf16Size);
+ string = strings[utf8];
+ if (string == NULL) {
+ return;
+ }
+ // ALOGI("VMRuntime.preloadDexCaches found string=%s", utf8);
+ dvmDexSetResolvedString(pDvmDex, stringIdx, string);
+}
+
+// Based on dvmResolveClass.
+static void preloadDexCachesResolveType(DvmDex* pDvmDex, uint32_t typeIdx) {
+ ClassObject* clazz = dvmDexGetResolvedClass(pDvmDex, typeIdx);
+ if (clazz != NULL) {
+ return;
+ }
+ const DexFile* pDexFile = pDvmDex->pDexFile;
+ const char* className = dexStringByTypeIdx(pDexFile, typeIdx);
+ if (className[0] != '\0' && className[1] == '\0') {
+ /* primitive type */
+ clazz = dvmFindPrimitiveClass(className[0]);
+ } else {
+ clazz = dvmLookupClass(className, NULL, true);
+ }
+ if (clazz == NULL) {
+ return;
+ }
+ // Skip uninitialized classes because filled cache entry implies it is initialized.
+ if (!dvmIsClassInitialized(clazz)) {
+ // ALOGI("VMRuntime.preloadDexCaches uninitialized clazz=%s", className);
+ return;
+ }
+ // ALOGI("VMRuntime.preloadDexCaches found clazz=%s", className);
+ dvmDexSetResolvedClass(pDvmDex, typeIdx, clazz);
+}
+
+// Based on dvmResolveInstField/dvmResolveStaticField.
+static void preloadDexCachesResolveField(DvmDex* pDvmDex, uint32_t fieldIdx, bool instance) {
+ Field* field = dvmDexGetResolvedField(pDvmDex, fieldIdx);
+ if (field != NULL) {
+ return;
+ }
+ const DexFile* pDexFile = pDvmDex->pDexFile;
+ const DexFieldId* pFieldId = dexGetFieldId(pDexFile, fieldIdx);
+ ClassObject* clazz = dvmDexGetResolvedClass(pDvmDex, pFieldId->classIdx);
+ if (clazz == NULL) {
+ return;
+ }
+ // Skip static fields for uninitialized classes because a filled
+ // cache entry implies the class is initialized.
+ if (!instance && !dvmIsClassInitialized(clazz)) {
+ return;
+ }
+ const char* fieldName = dexStringById(pDexFile, pFieldId->nameIdx);
+ const char* signature = dexStringByTypeIdx(pDexFile, pFieldId->typeIdx);
+ if (instance) {
+ field = dvmFindInstanceFieldHier(clazz, fieldName, signature);
+ } else {
+ field = dvmFindStaticFieldHier(clazz, fieldName, signature);
+ }
+ if (field == NULL) {
+ return;
+ }
+ // ALOGI("VMRuntime.preloadDexCaches found field %s %s.%s",
+ // signature, clazz->descriptor, fieldName);
+ dvmDexSetResolvedField(pDvmDex, fieldIdx, field);
+}
+
+// Based on dvmResolveMethod.
+static void preloadDexCachesResolveMethod(DvmDex* pDvmDex,
+ uint32_t methodIdx,
+ MethodType methodType) {
+ Method* method = dvmDexGetResolvedMethod(pDvmDex, methodIdx);
+ if (method != NULL) {
+ return;
+ }
+ const DexFile* pDexFile = pDvmDex->pDexFile;
+ const DexMethodId* pMethodId = dexGetMethodId(pDexFile, methodIdx);
+ ClassObject* clazz = dvmDexGetResolvedClass(pDvmDex, pMethodId->classIdx);
+ if (clazz == NULL) {
+ return;
+ }
+ // Skip static methods for uninitialized classes because a filled
+ // cache entry implies the class is initialized.
+ if ((methodType == METHOD_STATIC) && !dvmIsClassInitialized(clazz)) {
+ return;
+ }
+ const char* methodName = dexStringById(pDexFile, pMethodId->nameIdx);
+ DexProto proto;
+ dexProtoSetFromMethodId(&proto, pDexFile, pMethodId);
+
+ if (methodType == METHOD_DIRECT) {
+ method = dvmFindDirectMethod(clazz, methodName, &proto);
+ } else if (methodType == METHOD_STATIC) {
+ method = dvmFindDirectMethodHier(clazz, methodName, &proto);
+ } else {
+ method = dvmFindVirtualMethodHier(clazz, methodName, &proto);
}
+ if (method == NULL) {
+ return;
+ }
+ // ALOGI("VMRuntime.preloadDexCaches found method %s.%s",
+ // clazz->descriptor, methodName);
+ dvmDexSetResolvedMethod(pDvmDex, methodIdx, method);
+}
+
+struct DexCacheStats {
+ uint32_t numStrings;
+ uint32_t numTypes;
+ uint32_t numFields;
+ uint32_t numMethods;
+ DexCacheStats() : numStrings(0), numTypes(0), numFields(0), numMethods(0) {};
+};
+
+static const bool kPreloadDexCachesEnabled = true;
+
+// Disabled because it takes a long time (extra half second) but
+// gives almost no benefit in terms of saving private dirty pages.
+static const bool kPreloadDexCachesStrings = false;
+
+static const bool kPreloadDexCachesTypes = true;
+static const bool kPreloadDexCachesFieldsAndMethods = true;
+
+static const bool kPreloadDexCachesCollectStats = false;
+
+static void preloadDexCachesStatsTotal(DexCacheStats* total) {
+ if (!kPreloadDexCachesCollectStats) {
+ return;
+ }
+
+ for (ClassPathEntry* cpe = gDvm.bootClassPath; cpe->kind != kCpeLastEntry; cpe++) {
+ DvmDex* pDvmDex = getDvmDexFromClassPathEntry(cpe);
+ const DexHeader* pHeader = pDvmDex->pHeader;
+ total->numStrings += pHeader->stringIdsSize;
+ total->numFields += pHeader->fieldIdsSize;
+ total->numMethods += pHeader->methodIdsSize;
+ total->numTypes += pHeader->typeIdsSize;
+ }
+}
+
+static void preloadDexCachesStatsFilled(DexCacheStats* filled) {
+ if (!kPreloadDexCachesCollectStats) {
+ return;
+ }
+ for (ClassPathEntry* cpe = gDvm.bootClassPath; cpe->kind != kCpeLastEntry; cpe++) {
+ DvmDex* pDvmDex = getDvmDexFromClassPathEntry(cpe);
+ const DexHeader* pHeader = pDvmDex->pHeader;
+ for (size_t i = 0; i < pHeader->stringIdsSize; i++) {
+ StringObject* string = dvmDexGetResolvedString(pDvmDex, i);
+ if (string != NULL) {
+ filled->numStrings++;
+ }
+ }
+ for (size_t i = 0; i < pHeader->typeIdsSize; i++) {
+ ClassObject* clazz = dvmDexGetResolvedClass(pDvmDex, i);
+ if (clazz != NULL) {
+ filled->numTypes++;
+ }
+ }
+ for (size_t i = 0; i < pHeader->fieldIdsSize; i++) {
+ Field* field = dvmDexGetResolvedField(pDvmDex, i);
+ if (field != NULL) {
+ filled->numFields++;
+ }
+ }
+ for (size_t i = 0; i < pHeader->methodIdsSize; i++) {
+ Method* method = dvmDexGetResolvedMethod(pDvmDex, i);
+ if (method != NULL) {
+ filled->numMethods++;
+ }
+ }
+ }
+}
+
+static void Dalvik_dalvik_system_VMRuntime_preloadDexCaches(const u4* args, JValue* pResult)
+{
+ if (!kPreloadDexCachesEnabled) {
+ return;
+ }
+
+ DexCacheStats total;
+ DexCacheStats before;
+ if (kPreloadDexCachesCollectStats) {
+ ALOGI("VMRuntime.preloadDexCaches starting");
+ preloadDexCachesStatsTotal(&total);
+ preloadDexCachesStatsFilled(&before);
+ }
+
+ // We use a std::map to avoid heap allocating StringObjects to lookup in gDvm.literalStrings
+ StringTable strings;
+ if (kPreloadDexCachesStrings) {
+ dvmLockMutex(&gDvm.internLock);
+ dvmHashTableLock(gDvm.literalStrings);
+ for (int i = 0; i < gDvm.literalStrings->tableSize; ++i) {
+ HashEntry *entry = &gDvm.literalStrings->pEntries[i];
+ if (entry->data != NULL && entry->data != HASH_TOMBSTONE) {
+ preloadDexCachesStringsVisitor(&entry->data, 0, ROOT_INTERNED_STRING, &strings);
+ }
+ }
+ dvmHashTableUnlock(gDvm.literalStrings);
+ dvmUnlockMutex(&gDvm.internLock);
+ }
+
+ for (ClassPathEntry* cpe = gDvm.bootClassPath; cpe->kind != kCpeLastEntry; cpe++) {
+ DvmDex* pDvmDex = getDvmDexFromClassPathEntry(cpe);
+ const DexHeader* pHeader = pDvmDex->pHeader;
+ const DexFile* pDexFile = pDvmDex->pDexFile;
+
+ if (kPreloadDexCachesStrings) {
+ for (size_t i = 0; i < pHeader->stringIdsSize; i++) {
+ preloadDexCachesResolveString(pDvmDex, i, strings);
+ }
+ }
+
+ if (kPreloadDexCachesTypes) {
+ for (size_t i = 0; i < pHeader->typeIdsSize; i++) {
+ preloadDexCachesResolveType(pDvmDex, i);
+ }
+ }
+
+ if (kPreloadDexCachesFieldsAndMethods) {
+ for (size_t classDefIndex = 0;
+ classDefIndex < pHeader->classDefsSize;
+ classDefIndex++) {
+ const DexClassDef* pClassDef = dexGetClassDef(pDexFile, classDefIndex);
+ const u1* pEncodedData = dexGetClassData(pDexFile, pClassDef);
+ UniquePtr<DexClassData> pClassData(dexReadAndVerifyClassData(&pEncodedData, NULL));
+ if (pClassData.get() == NULL) {
+ continue;
+ }
+ for (uint32_t fieldIndex = 0;
+ fieldIndex < pClassData->header.staticFieldsSize;
+ fieldIndex++) {
+ const DexField* pField = &pClassData->staticFields[fieldIndex];
+ preloadDexCachesResolveField(pDvmDex, pField->fieldIdx, false);
+ }
+ for (uint32_t fieldIndex = 0;
+ fieldIndex < pClassData->header.instanceFieldsSize;
+ fieldIndex++) {
+ const DexField* pField = &pClassData->instanceFields[fieldIndex];
+ preloadDexCachesResolveField(pDvmDex, pField->fieldIdx, true);
+ }
+ for (uint32_t methodIndex = 0;
+ methodIndex < pClassData->header.directMethodsSize;
+ methodIndex++) {
+ const DexMethod* pDexMethod = &pClassData->directMethods[methodIndex];
+ MethodType methodType = (((pDexMethod->accessFlags & ACC_STATIC) != 0) ?
+ METHOD_STATIC :
+ METHOD_DIRECT);
+ preloadDexCachesResolveMethod(pDvmDex, pDexMethod->methodIdx, methodType);
+ }
+ for (uint32_t methodIndex = 0;
+ methodIndex < pClassData->header.virtualMethodsSize;
+ methodIndex++) {
+ const DexMethod* pDexMethod = &pClassData->virtualMethods[methodIndex];
+ preloadDexCachesResolveMethod(pDvmDex, pDexMethod->methodIdx, METHOD_VIRTUAL);
+ }
+ }
+ }
+ }
+
+ if (kPreloadDexCachesCollectStats) {
+ DexCacheStats after;
+ preloadDexCachesStatsFilled(&after);
+ ALOGI("VMRuntime.preloadDexCaches strings total=%d before=%d after=%d",
+ total.numStrings, before.numStrings, after.numStrings);
+ ALOGI("VMRuntime.preloadDexCaches types total=%d before=%d after=%d",
+ total.numTypes, before.numTypes, after.numTypes);
+ ALOGI("VMRuntime.preloadDexCaches fields total=%d before=%d after=%d",
+ total.numFields, before.numFields, after.numFields);
+ ALOGI("VMRuntime.preloadDexCaches methods total=%d before=%d after=%d",
+ total.numMethods, before.numMethods, after.numMethods);
+ ALOGI("VMRuntime.preloadDexCaches finished");
+ }
+
RETURN_VOID();
}
@@ -263,5 +610,13 @@ const DalvikNativeMethod dvm_dalvik_system_VMRuntime[] = {
Dalvik_dalvik_system_VMRuntime_startJitCompilation },
{ "vmVersion", "()Ljava/lang/String;",
Dalvik_dalvik_system_VMRuntime_vmVersion },
+ { "vmLibrary", "()Ljava/lang/String;",
+ Dalvik_dalvik_system_VMRuntime_vmLibrary },
+ { "registerNativeAllocation", "(I)V",
+ Dalvik_dalvik_system_VMRuntime_registerNativeAllocation },
+ { "registerNativeFree", "(I)V",
+ Dalvik_dalvik_system_VMRuntime_registerNativeFree },
+ { "preloadDexCaches", "()V",
+ Dalvik_dalvik_system_VMRuntime_preloadDexCaches },
{ NULL, NULL, NULL },
};
diff --git a/vm/native/dalvik_system_Zygote.cpp b/vm/native/dalvik_system_Zygote.cpp
index 6e6c8b742..cd69d3633 100644
--- a/vm/native/dalvik_system_Zygote.cpp
+++ b/vm/native/dalvik_system_Zygote.cpp
@@ -276,18 +276,14 @@ static int mountEmulatedStorage(uid_t uid, u4 mountMode) {
// Prepare source paths
char source_user[PATH_MAX];
- char source_obb[PATH_MAX];
char target_user[PATH_MAX];
// /mnt/shell/emulated/0
snprintf(source_user, PATH_MAX, "%s/%d", source, userid);
- // /mnt/shell/emulated/obb
- snprintf(source_obb, PATH_MAX, "%s/obb", source);
// /storage/emulated/0
snprintf(target_user, PATH_MAX, "%s/%d", target, userid);
if (fs_prepare_dir(source_user, 0000, 0, 0) == -1
- || fs_prepare_dir(source_obb, 0000, 0, 0) == -1
|| fs_prepare_dir(target_user, 0000, 0, 0) == -1) {
return -1;
}
@@ -306,23 +302,7 @@ static int mountEmulatedStorage(uid_t uid, u4 mountMode) {
}
}
- // Now that user is mounted, prepare and mount OBB storage
- // into place for current user
- char target_android[PATH_MAX];
- char target_obb[PATH_MAX];
-
- // /storage/emulated/0/Android
- snprintf(target_android, PATH_MAX, "%s/%d/Android", target, userid);
- // /storage/emulated/0/Android/obb
- snprintf(target_obb, PATH_MAX, "%s/%d/Android/obb", target, userid);
-
- if (fs_prepare_dir(target_android, 0000, 0, 0) == -1
- || fs_prepare_dir(target_obb, 0000, 0, 0) == -1
- || fs_prepare_dir(legacy, 0000, 0, 0) == -1) {
- return -1;
- }
- if (mount(source_obb, target_obb, NULL, MS_BIND, NULL) == -1) {
- ALOGE("Failed to mount %s to %s: %s", source_obb, target_obb, strerror(errno));
+ if (fs_prepare_dir(legacy, 0000, 0, 0) == -1) {
return -1;
}
diff --git a/vm/native/java_lang_Class.cpp b/vm/native/java_lang_Class.cpp
index 93455e764..9b3b2f0f5 100644
--- a/vm/native/java_lang_Class.cpp
+++ b/vm/native/java_lang_Class.cpp
@@ -19,7 +19,7 @@
*/
#include "Dalvik.h"
#include "native/InternalNativePriv.h"
-
+#include "ScopedPthreadMutexLock.h"
/*
* native public boolean desiredAssertionStatus()
@@ -757,6 +757,53 @@ static void Dalvik_java_lang_Class_getInnerClassName(const u4* args,
}
}
+JNIEXPORT jobject JNICALL Java_java_lang_Class_getDex(JNIEnv* env, jclass javaClass) {
+ Thread* self = dvmThreadSelf();
+ ClassObject* c = (ClassObject*) dvmDecodeIndirectRef(self, javaClass);
+
+ DvmDex* dvm_dex = c->pDvmDex;
+ if (dvm_dex == NULL) {
+ return NULL;
+ }
+ // Already cached?
+ if (dvm_dex->dex_object != NULL) {
+ return dvm_dex->dex_object;
+ }
+ jobject byte_buffer = env->NewDirectByteBuffer(dvm_dex->memMap.addr, dvm_dex->memMap.length);
+ if (byte_buffer == NULL) {
+ return NULL;
+ }
+
+ jclass com_android_dex_Dex = env->FindClass("com/android/dex/Dex");
+ if (com_android_dex_Dex == NULL) {
+ return NULL;
+ }
+
+ jmethodID com_android_dex_Dex_create =
+ env->GetStaticMethodID(com_android_dex_Dex,
+ "create", "(Ljava/nio/ByteBuffer;)Lcom/android/dex/Dex;");
+ if (com_android_dex_Dex_create == NULL) {
+ return NULL;
+ }
+
+ jvalue args[1];
+ args[0].l = byte_buffer;
+ jobject local_ref = env->CallStaticObjectMethodA(com_android_dex_Dex,
+ com_android_dex_Dex_create,
+ args);
+ if (local_ref == NULL) {
+ return NULL;
+ }
+
+ // Check another thread didn't cache an object, if we've won install the object.
+ ScopedPthreadMutexLock lock(&dvm_dex->modLock);
+
+ if (dvm_dex->dex_object == NULL) {
+ dvm_dex->dex_object = env->NewGlobalRef(local_ref);
+ }
+ return dvm_dex->dex_object;
+}
+
const DalvikNativeMethod dvm_java_lang_Class[] = {
{ "desiredAssertionStatus", "()Z",
Dalvik_java_lang_Class_desiredAssertionStatus },
diff --git a/vm/native/java_lang_System.cpp b/vm/native/java_lang_System.cpp
index 29063cdc7..1b07f85ad 100644
--- a/vm/native/java_lang_System.cpp
+++ b/vm/native/java_lang_System.cpp
@@ -360,40 +360,6 @@ static void Dalvik_java_lang_System_arraycopy(const u4* args, JValue* pResult)
}
/*
- * static long currentTimeMillis()
- *
- * Current time, in miliseconds. This doesn't need to be internal to the
- * VM, but we're already handling java.lang.System here.
- */
-static void Dalvik_java_lang_System_currentTimeMillis(const u4* args,
- JValue* pResult)
-{
- struct timeval tv;
-
- UNUSED_PARAMETER(args);
-
- gettimeofday(&tv, (struct timezone *) NULL);
- long long when = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
-
- RETURN_LONG(when);
-}
-
-/*
- * static long nanoTime()
- *
- * Current monotonically-increasing time, in nanoseconds. This doesn't
- * need to be internal to the VM, but we're already handling
- * java.lang.System here.
- */
-static void Dalvik_java_lang_System_nanoTime(const u4* args, JValue* pResult)
-{
- UNUSED_PARAMETER(args);
-
- u8 when = dvmGetRelativeTimeNsec();
- RETURN_LONG(when);
-}
-
-/*
* static int identityHashCode(Object x)
*
* Returns that hash code that the default hashCode()
@@ -407,41 +373,10 @@ static void Dalvik_java_lang_System_identityHashCode(const u4* args,
RETURN_INT(dvmIdentityHashCode(thisPtr));
}
-static void Dalvik_java_lang_System_mapLibraryName(const u4* args,
- JValue* pResult)
-{
- StringObject* nameObj = (StringObject*) args[0];
- StringObject* result = NULL;
- char* name;
- char* mappedName;
-
- if (nameObj == NULL) {
- dvmThrowNullPointerException("userLibName == null");
- RETURN_VOID();
- }
-
- name = dvmCreateCstrFromString(nameObj);
- mappedName = dvmCreateSystemLibraryName(name);
- if (mappedName != NULL) {
- result = dvmCreateStringFromCstr(mappedName);
- dvmReleaseTrackedAlloc((Object*) result, NULL);
- }
-
- free(name);
- free(mappedName);
- RETURN_PTR(result);
-}
-
const DalvikNativeMethod dvm_java_lang_System[] = {
{ "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V",
Dalvik_java_lang_System_arraycopy },
- { "currentTimeMillis", "()J",
- Dalvik_java_lang_System_currentTimeMillis },
{ "identityHashCode", "(Ljava/lang/Object;)I",
Dalvik_java_lang_System_identityHashCode },
- { "mapLibraryName", "(Ljava/lang/String;)Ljava/lang/String;",
- Dalvik_java_lang_System_mapLibraryName },
- { "nanoTime", "()J",
- Dalvik_java_lang_System_nanoTime },
{ NULL, NULL, NULL },
};
diff --git a/vm/oo/Array.cpp b/vm/oo/Array.cpp
index 345dac3cb..ce4a5e2ca 100644
--- a/vm/oo/Array.cpp
+++ b/vm/oo/Array.cpp
@@ -438,20 +438,22 @@ static ClassObject* createArrayClass(const char* descriptor, Object* loader)
/*
* Inherit access flags from the element. Arrays can't be used as a
- * superclass or interface, so we want to add "final" and remove
+ * superclass or interface, so we want to add "abstract final" and remove
* "interface".
- *
- * Don't inherit any non-standard flags (e.g., CLASS_FINALIZABLE)
- * from elementClass. We assume that the array class does not
- * override finalize().
*/
- newClass->accessFlags = ((newClass->elementClass->accessFlags &
- ~ACC_INTERFACE) | ACC_FINAL) & JAVA_FLAGS_MASK;
+ int accessFlags = elementClass->accessFlags;
+ if (!gDvm.optimizing) {
+ // If the element class is an inner class, make sure we get the correct access flags.
+ StringObject* className = NULL;
+ dvmGetInnerClass(elementClass, &className, &accessFlags);
+ dvmReleaseTrackedAlloc((Object*) className, NULL);
+ }
+ accessFlags &= JAVA_FLAGS_MASK;
+ accessFlags &= ~ACC_INTERFACE;
+ accessFlags |= ACC_ABSTRACT | ACC_FINAL;
- /* Set the flags we determined above.
- * This must happen after accessFlags is set.
- */
- SET_CLASS_FLAG(newClass, extraFlags);
+ // Set the flags we determined above.
+ SET_CLASS_FLAG(newClass, accessFlags | extraFlags);
if (!dvmAddClassToHash(newClass)) {
/*
diff --git a/vm/oo/Class.cpp b/vm/oo/Class.cpp
index 5a94484b8..2a23a9f9a 100644
--- a/vm/oo/Class.cpp
+++ b/vm/oo/Class.cpp
@@ -1746,6 +1746,9 @@ static ClassObject* loadClassFromDex0(DvmDex* pDvmDex,
* Make sure the aren't any "bonus" flags set, since we use them for
* runtime state.
*/
+ /* bits we can reasonably expect to see set in a DEX access flags field */
+ const uint32_t EXPECTED_FILE_FLAGS = (ACC_CLASS_MASK | CLASS_ISPREVERIFIED |
+ CLASS_ISOPTIMIZED);
if ((pClassDef->accessFlags & ~EXPECTED_FILE_FLAGS) != 0) {
ALOGW("Invalid file flags in class %s: %04x",
descriptor, pClassDef->accessFlags);
diff --git a/vm/oo/Object.h b/vm/oo/Object.h
index 4e6103ade..92438ba61 100644
--- a/vm/oo/Object.h
+++ b/vm/oo/Object.h
@@ -82,10 +82,6 @@ enum ClassFlags {
CLASS_ISPREVERIFIED = (1<<16), // class has been pre-verified
};
-/* bits we can reasonably expect to see set in a DEX access flags field */
-#define EXPECTED_FILE_FLAGS \
- (ACC_CLASS_MASK | CLASS_ISPREVERIFIED | CLASS_ISOPTIMIZED)
-
/*
* Get/set class flags.
*/
@@ -306,6 +302,8 @@ struct Field {
u4 accessFlags;
};
+u4 dvmGetFieldIdx(const Field* field);
+
/*
* Static field.
*/
@@ -582,6 +580,8 @@ struct Method {
bool inProfile;
};
+u4 dvmGetMethodIdx(const Method* method);
+
/*
* Find a method within a class. The superclass is not searched.
diff --git a/vm/os/android.cpp b/vm/os/android.cpp
index 24ebd5a05..b37bb70f2 100644
--- a/vm/os/android.cpp
+++ b/vm/os/android.cpp
@@ -23,8 +23,8 @@
#include <limits.h>
#include <errno.h>
+#include <system/thread_defs.h>
#include <cutils/sched_policy.h>
-#include <utils/threads.h>
/*
* Conversion map for "nice" values.
diff --git a/vm/reflect/Annotation.cpp b/vm/reflect/Annotation.cpp
index 7ef0bdedd..942027c7b 100644
--- a/vm/reflect/Annotation.cpp
+++ b/vm/reflect/Annotation.cpp
@@ -666,9 +666,7 @@ static Object* createAnnotationMember(const ClassObject* clazz,
valueObj = (Object*)avalue.value.l;
/* new member to hold the element */
- newMember =
- dvmAllocObject(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember,
- ALLOC_DEFAULT);
+ newMember = dvmAllocObject(gDvm.classLibcoreReflectAnnotationMember, ALLOC_DEFAULT);
name = dexStringById(pDexFile, elementNameIdx);
nameObj = dvmCreateStringFromCstr(name);
@@ -775,9 +773,8 @@ static Object* processEncodedAnnotation(const ClassObject* clazz,
JValue result;
if (size > 0) {
- elementArray = dvmAllocArrayByClass(
- gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray,
- size, ALLOC_DEFAULT);
+ elementArray = dvmAllocArrayByClass(gDvm.classLibcoreReflectAnnotationMemberArray,
+ size, ALLOC_DEFAULT);
if (elementArray == NULL) {
ALOGE("failed to allocate annotation member array (%d elements)",
size);
@@ -833,10 +830,10 @@ static ArrayObject* processAnnotationSet(const ClassObject* clazz,
const DexAnnotationItem* pAnnoItem;
/* we need these later; make sure they're initialized */
- if (!dvmIsClassInitialized(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory))
- dvmInitClass(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
- if (!dvmIsClassInitialized(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember))
- dvmInitClass(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
+ if (!dvmIsClassInitialized(gDvm.classLibcoreReflectAnnotationFactory))
+ dvmInitClass(gDvm.classLibcoreReflectAnnotationFactory);
+ if (!dvmIsClassInitialized(gDvm.classLibcoreReflectAnnotationMember))
+ dvmInitClass(gDvm.classLibcoreReflectAnnotationMember);
/* count up the number of visible elements */
size_t count = 0;
@@ -907,10 +904,10 @@ static const DexAnnotationItem* getAnnotationItemFromAnnotationSet(
u4 typeIdx;
/* we need these later; make sure they're initialized */
- if (!dvmIsClassInitialized(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory))
- dvmInitClass(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
- if (!dvmIsClassInitialized(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember))
- dvmInitClass(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
+ if (!dvmIsClassInitialized(gDvm.classLibcoreReflectAnnotationFactory))
+ dvmInitClass(gDvm.classLibcoreReflectAnnotationFactory);
+ if (!dvmIsClassInitialized(gDvm.classLibcoreReflectAnnotationMember))
+ dvmInitClass(gDvm.classLibcoreReflectAnnotationMember);
for (i = 0; i < (int) pAnnoSet->size; i++) {
pAnnoItem = dexGetAnnotationItem(pDexFile, pAnnoSet, i);
@@ -1613,8 +1610,10 @@ static int compareMethodStr(DexFile* pDexFile, u4 methodIdx,
* out reasonably well because it's in sorted order, though we're still left
* doing a fair number of string comparisons.
*/
-static u4 getMethodIdx(const Method* method)
+u4 dvmGetMethodIdx(const Method* method)
{
+ if (method->clazz->pDvmDex == NULL) return 0;
+
DexFile* pDexFile = method->clazz->pDvmDex->pDexFile;
u4 hi = pDexFile->pHeader->methodIdsSize -1;
u4 lo = 0;
@@ -1679,7 +1678,7 @@ static const DexAnnotationSetItem* findAnnotationSetForMethod(
* find the method definition in the DEX file and perform string
* comparisons on class name, method name, and signature.
*/
- u4 methodIdx = getMethodIdx(method);
+ u4 methodIdx = dvmGetMethodIdx(method);
u4 count = dexGetMethodAnnotationsSize(pDexFile, pAnnoDir);
u4 idx;
@@ -1921,10 +1920,12 @@ static int compareFieldStr(DexFile* pDexFile, u4 idx, const Field* field)
/*
* Given a field, determine the field's index.
*
- * This has the same tradeoffs as getMethodIdx.
+ * This has the same tradeoffs as dvmGetMethodIdx.
*/
-static u4 getFieldIdx(const Field* field)
+u4 dvmGetFieldIdx(const Field* field)
{
+ if (field->clazz->pDvmDex == NULL) return 0;
+
DexFile* pDexFile = field->clazz->pDvmDex->pDexFile;
u4 hi = pDexFile->pHeader->fieldIdsSize -1;
u4 lo = 0;
@@ -1990,7 +1991,7 @@ static const DexAnnotationSetItem* findAnnotationSetForField(const Field* field)
* find the field definition in the DEX file and perform string
* comparisons on class name, field name, and signature.
*/
- u4 fieldIdx = getFieldIdx(field);
+ u4 fieldIdx = dvmGetFieldIdx(field);
u4 count = dexGetFieldAnnotationsSize(pDexFile, pAnnoDir);
u4 idx;
@@ -2172,7 +2173,7 @@ static const DexParameterAnnotationsItem* findAnnotationsItemForMethod(
* find the method definition in the DEX file and perform string
* comparisons on class name, method name, and signature.
*/
- u4 methodIdx = getMethodIdx(method);
+ u4 methodIdx = dvmGetMethodIdx(method);
u4 count = dexGetParameterAnnotationsSize(pDexFile, pAnnoDir);
u4 idx;
diff --git a/vm/reflect/Reflect.cpp b/vm/reflect/Reflect.cpp
index 1d055c13d..9cb9fc7ce 100644
--- a/vm/reflect/Reflect.cpp
+++ b/vm/reflect/Reflect.cpp
@@ -226,7 +226,7 @@ static Object* createFieldObject(Field* field, const ClassObject* clazz)
ClassObject* type;
char* mangle;
char* cp;
- int slot;
+ int slot, field_idx;
assert(dvmIsClassInitialized(gDvm.classJavaLangReflectField));
@@ -245,10 +245,11 @@ static Object* createFieldObject(Field* field, const ClassObject* clazz)
goto bail;
slot = fieldToSlot(field, clazz);
+ field_idx = dvmGetFieldIdx(field);
JValue unused;
dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangReflectField_init,
- fieldObj, &unused, clazz, type, nameObj, slot);
+ fieldObj, &unused, clazz, type, nameObj, slot, field_idx);
if (dvmCheckException(dvmThreadSelf())) {
ALOGD("Field class init threw exception");
goto bail;
@@ -393,7 +394,7 @@ static Object* createConstructorObject(Method* meth)
Object* consObj;
DexStringCache mangle;
char* cp;
- int slot;
+ int slot, method_idx;
dexStringCacheInit(&mangle);
@@ -425,10 +426,11 @@ static Object* createConstructorObject(Method* meth)
goto bail;
slot = methodToSlot(meth);
+ method_idx = dvmGetMethodIdx(meth);
JValue unused;
dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangReflectConstructor_init,
- consObj, &unused, meth->clazz, params, exceptions, slot);
+ consObj, &unused, meth->clazz, params, exceptions, slot, method_idx);
if (dvmCheckException(dvmThreadSelf())) {
ALOGD("Constructor class init threw exception");
goto bail;
@@ -532,7 +534,7 @@ Object* dvmCreateReflectMethodObject(const Method* meth)
ClassObject* returnType;
DexStringCache mangle;
char* cp;
- int slot;
+ int slot, method_idx;
if (dvmCheckException(dvmThreadSelf())) {
ALOGW("WARNING: dvmCreateReflectMethodObject called with "
@@ -577,11 +579,12 @@ Object* dvmCreateReflectMethodObject(const Method* meth)
goto bail;
slot = methodToSlot(meth);
+ method_idx = dvmGetMethodIdx(meth);
JValue unused;
dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangReflectMethod_init,
methObj, &unused, meth->clazz, params, exceptions, returnType,
- nameObj, slot);
+ nameObj, slot, method_idx);
if (dvmCheckException(dvmThreadSelf())) {
ALOGD("Method class init threw exception");
goto bail;